From 3c84f721fe198a319e5b76631a044678881cd89b Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Fri, 22 Oct 2021 13:21:38 +0100 Subject: [PATCH 001/146] Update build_and_test.yml --- .github/workflows/build_and_test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 0adb8155ac..38cc8e97ae 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -5,6 +5,7 @@ on: branches: - main - develop + - feature/routing-v3 push: branches: - develop From 39f46d381b40e37d9d37ee00331b308d869e525a Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Mon, 25 Oct 2021 14:48:09 +0100 Subject: [PATCH 002/146] Feature/TokenSwapping (#94) * Copy TokenSwapping CodeBase, update CMakeLists.txt * Add TokenSwapping tests * Update GraphTests to use TokenSwapping RNG * Remove "class RNG;" * Add cpp files to compilation --- tket/src/CMakeLists.txt | 40 + .../src/TokenSwapping/ArchitectureMapping.cpp | 70 + .../src/TokenSwapping/ArchitectureMapping.hpp | 78 + tket/src/TokenSwapping/BestFullTsa.cpp | 52 + tket/src/TokenSwapping/BestFullTsa.hpp | 68 + .../TokenSwapping/CyclesCandidateManager.cpp | 208 ++ .../TokenSwapping/CyclesCandidateManager.hpp | 180 ++ .../src/TokenSwapping/CyclesGrowthManager.cpp | 185 ++ .../src/TokenSwapping/CyclesGrowthManager.hpp | 222 ++ tket/src/TokenSwapping/CyclesPartialTsa.cpp | 91 + tket/src/TokenSwapping/CyclesPartialTsa.hpp | 105 + .../TokenSwapping/CyclicShiftCostEstimate.cpp | 53 + .../TokenSwapping/CyclicShiftCostEstimate.hpp | 64 + .../DistancesFromArchitecture.cpp | 78 + .../DistancesFromArchitecture.hpp | 76 + tket/src/TokenSwapping/DistancesInterface.cpp | 25 + tket/src/TokenSwapping/DistancesInterface.hpp | 56 + .../src/TokenSwapping/DynamicTokenTracker.cpp | 65 + .../src/TokenSwapping/DynamicTokenTracker.hpp | 82 + tket/src/TokenSwapping/HybridTsa00.cpp | 45 + tket/src/TokenSwapping/HybridTsa00.hpp | 51 + .../NeighboursFromArchitecture.cpp | 57 + .../NeighboursFromArchitecture.hpp | 39 + .../src/TokenSwapping/NeighboursInterface.cpp | 15 + .../src/TokenSwapping/NeighboursInterface.hpp | 34 + .../src/TokenSwapping/PartialTsaInterface.cpp | 11 + .../src/TokenSwapping/PartialTsaInterface.hpp | 56 + .../src/TokenSwapping/PathFinderInterface.cpp | 27 + .../src/TokenSwapping/PathFinderInterface.hpp | 74 + tket/src/TokenSwapping/RNG.cpp | 162 + tket/src/TokenSwapping/RNG.hpp | 175 ++ .../src/TokenSwapping/RiverFlowPathFinder.cpp | 179 ++ .../src/TokenSwapping/RiverFlowPathFinder.hpp | 86 + tket/src/TokenSwapping/SwapListOptimiser.cpp | 283 ++ tket/src/TokenSwapping/SwapListOptimiser.hpp | 149 + .../TokenSwapping/TSAUtils/DebugFunctions.cpp | 28 + .../TokenSwapping/TSAUtils/DebugFunctions.hpp | 34 + .../TSAUtils/DistanceFunctions.cpp | 52 + .../TSAUtils/DistanceFunctions.hpp | 76 + .../TSAUtils/GeneralFunctions.cpp | 39 + .../TSAUtils/GeneralFunctions.hpp | 87 + .../TokenSwapping/TSAUtils/SwapFunctions.cpp | 29 + .../TokenSwapping/TSAUtils/SwapFunctions.hpp | 36 + .../TSAUtils/VertexMappingFunctions.cpp | 82 + .../TSAUtils/VertexMappingFunctions.hpp | 74 + .../TSAUtils/VertexSwapResult.cpp | 46 + .../TSAUtils/VertexSwapResult.hpp | 47 + .../TableLookup/CanonicalRelabelling.cpp | 116 + .../TableLookup/CanonicalRelabelling.hpp | 97 + .../TableLookup/ExactMappingLookup.cpp | 124 + .../TableLookup/ExactMappingLookup.hpp | 69 + .../TableLookup/FilteredSwapSequences.cpp | 260 ++ .../TableLookup/FilteredSwapSequences.hpp | 126 + .../TableLookup/PartialMappingLookup.cpp | 78 + .../TableLookup/PartialMappingLookup.hpp | 64 + .../TableLookup/SwapConversion.cpp | 76 + .../TableLookup/SwapConversion.hpp | 89 + .../TableLookup/SwapListSegmentOptimiser.cpp | 162 + .../TableLookup/SwapListSegmentOptimiser.hpp | 85 + .../TableLookup/SwapListTableOptimiser.cpp | 223 ++ .../TableLookup/SwapListTableOptimiser.hpp | 79 + .../TableLookup/SwapSequenceTable.cpp | 1408 +++++++++ .../TableLookup/SwapSequenceTable.hpp | 105 + .../TableLookup/VertexMapResizing.cpp | 157 + .../TableLookup/VertexMapResizing.hpp | 108 + tket/src/TokenSwapping/TrivialTSA.cpp | 292 ++ tket/src/TokenSwapping/TrivialTSA.hpp | 209 ++ tket/src/TokenSwapping/VectorListHybrid.hpp | 522 ++++ .../VectorListHybridSkeleton.cpp | 295 ++ .../VectorListHybridSkeleton.hpp | 153 + .../TokenSwapping/main_entry_functions.cpp | 129 + .../TokenSwapping/main_entry_functions.hpp | 52 + tket/tests/Graphs/EdgeSequence.hpp | 4 +- tket/tests/Graphs/RandomGraphGeneration.cpp | 3 +- tket/tests/Graphs/RandomPlanarGraphs.cpp | 3 +- tket/tests/Graphs/RandomPlanarGraphs.hpp | 4 +- tket/tests/Graphs/test_GraphColouring.cpp | 5 +- .../tests/Graphs/test_GraphFindComponents.cpp | 5 +- tket/tests/Graphs/test_GraphFindMaxClique.cpp | 5 +- tket/tests/Graphs/test_RNG.cpp | 5 +- .../Data/FixedCompleteSolutions.cpp | 2630 +++++++++++++++++ .../Data/FixedCompleteSolutions.hpp | 67 + .../TokenSwapping/Data/FixedSwapSequences.cpp | 2197 ++++++++++++++ .../TokenSwapping/Data/FixedSwapSequences.hpp | 78 + .../TableLookup/NeighboursFromEdges.cpp | 26 + .../TableLookup/NeighboursFromEdges.hpp | 51 + .../TableLookup/PermutationTestUtils.cpp | 56 + .../TableLookup/PermutationTestUtils.hpp | 27 + .../SwapSequenceReductionTester.cpp | 142 + .../SwapSequenceReductionTester.hpp | 57 + .../TableLookup/test_CanonicalRelabelling.cpp | 166 ++ .../TableLookup/test_ExactMappingLookup.cpp | 187 ++ .../test_FilteredSwapSequences.cpp | 141 + .../test_SwapSequenceReductions.cpp | 152 + .../TableLookup/test_SwapSequenceTable.cpp | 165 ++ .../TokenSwapping/TestUtils/BestTsaTester.cpp | 153 + .../TokenSwapping/TestUtils/BestTsaTester.hpp | 54 + .../TestUtils/DecodedProblemData.cpp | 167 ++ .../TestUtils/DecodedProblemData.hpp | 64 + .../TestUtils/FullTsaTesting.cpp | 220 ++ .../TestUtils/FullTsaTesting.hpp | 74 + .../TestUtils/PartialTsaTesting.cpp | 138 + .../TestUtils/PartialTsaTesting.hpp | 35 + .../TestUtils/ProblemGeneration.cpp | 115 + .../TestUtils/ProblemGeneration.hpp | 66 + .../TestUtils/TestStatsStructs.cpp | 58 + .../TestUtils/TestStatsStructs.hpp | 44 + .../test_ArchitectureMappingEndToEnd.cpp | 95 + .../test_BestTsaFixedSwapSequences.cpp | 306 ++ .../test_DistancesFromArchitecture.cpp | 67 + tket/tests/TokenSwapping/test_FullTsa.cpp | 242 ++ .../test_RiverFlowPathFinder.cpp | 254 ++ tket/tests/TokenSwapping/test_SwapList.cpp | 41 + .../TokenSwapping/test_SwapListOptimiser.cpp | 482 +++ .../TokenSwapping/test_VariousPartialTsa.cpp | 287 ++ .../TokenSwapping/test_VectorListHybrid.cpp | 243 ++ .../test_VectorListHybridSkeleton.cpp | 295 ++ .../test_main_entry_functions.cpp | 99 + tket/tests/tkettestsfiles.cmake | 27 + 119 files changed, 18727 insertions(+), 24 deletions(-) create mode 100644 tket/src/TokenSwapping/ArchitectureMapping.cpp create mode 100644 tket/src/TokenSwapping/ArchitectureMapping.hpp create mode 100644 tket/src/TokenSwapping/BestFullTsa.cpp create mode 100644 tket/src/TokenSwapping/BestFullTsa.hpp create mode 100644 tket/src/TokenSwapping/CyclesCandidateManager.cpp create mode 100644 tket/src/TokenSwapping/CyclesCandidateManager.hpp create mode 100644 tket/src/TokenSwapping/CyclesGrowthManager.cpp create mode 100644 tket/src/TokenSwapping/CyclesGrowthManager.hpp create mode 100644 tket/src/TokenSwapping/CyclesPartialTsa.cpp create mode 100644 tket/src/TokenSwapping/CyclesPartialTsa.hpp create mode 100644 tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp create mode 100644 tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp create mode 100644 tket/src/TokenSwapping/DistancesFromArchitecture.cpp create mode 100644 tket/src/TokenSwapping/DistancesFromArchitecture.hpp create mode 100644 tket/src/TokenSwapping/DistancesInterface.cpp create mode 100644 tket/src/TokenSwapping/DistancesInterface.hpp create mode 100644 tket/src/TokenSwapping/DynamicTokenTracker.cpp create mode 100644 tket/src/TokenSwapping/DynamicTokenTracker.hpp create mode 100644 tket/src/TokenSwapping/HybridTsa00.cpp create mode 100644 tket/src/TokenSwapping/HybridTsa00.hpp create mode 100644 tket/src/TokenSwapping/NeighboursFromArchitecture.cpp create mode 100644 tket/src/TokenSwapping/NeighboursFromArchitecture.hpp create mode 100644 tket/src/TokenSwapping/NeighboursInterface.cpp create mode 100644 tket/src/TokenSwapping/NeighboursInterface.hpp create mode 100644 tket/src/TokenSwapping/PartialTsaInterface.cpp create mode 100644 tket/src/TokenSwapping/PartialTsaInterface.hpp create mode 100644 tket/src/TokenSwapping/PathFinderInterface.cpp create mode 100644 tket/src/TokenSwapping/PathFinderInterface.hpp create mode 100644 tket/src/TokenSwapping/RNG.cpp create mode 100644 tket/src/TokenSwapping/RNG.hpp create mode 100644 tket/src/TokenSwapping/RiverFlowPathFinder.cpp create mode 100644 tket/src/TokenSwapping/RiverFlowPathFinder.hpp create mode 100644 tket/src/TokenSwapping/SwapListOptimiser.cpp create mode 100644 tket/src/TokenSwapping/SwapListOptimiser.hpp create mode 100644 tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp create mode 100644 tket/src/TokenSwapping/TSAUtils/DebugFunctions.hpp create mode 100644 tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp create mode 100644 tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp create mode 100644 tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp create mode 100644 tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp create mode 100644 tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp create mode 100644 tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp create mode 100644 tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp create mode 100644 tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.hpp create mode 100644 tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp create mode 100644 tket/src/TokenSwapping/TSAUtils/VertexSwapResult.hpp create mode 100644 tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp create mode 100644 tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp create mode 100644 tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp create mode 100644 tket/src/TokenSwapping/TableLookup/ExactMappingLookup.hpp create mode 100644 tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp create mode 100644 tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.hpp create mode 100644 tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp create mode 100644 tket/src/TokenSwapping/TableLookup/PartialMappingLookup.hpp create mode 100644 tket/src/TokenSwapping/TableLookup/SwapConversion.cpp create mode 100644 tket/src/TokenSwapping/TableLookup/SwapConversion.hpp create mode 100644 tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp create mode 100644 tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp create mode 100644 tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp create mode 100644 tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp create mode 100644 tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp create mode 100644 tket/src/TokenSwapping/TableLookup/SwapSequenceTable.hpp create mode 100644 tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp create mode 100644 tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp create mode 100644 tket/src/TokenSwapping/TrivialTSA.cpp create mode 100644 tket/src/TokenSwapping/TrivialTSA.hpp create mode 100644 tket/src/TokenSwapping/VectorListHybrid.hpp create mode 100644 tket/src/TokenSwapping/VectorListHybridSkeleton.cpp create mode 100644 tket/src/TokenSwapping/VectorListHybridSkeleton.hpp create mode 100644 tket/src/TokenSwapping/main_entry_functions.cpp create mode 100644 tket/src/TokenSwapping/main_entry_functions.hpp create mode 100644 tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp create mode 100644 tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp create mode 100644 tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp create mode 100644 tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp create mode 100644 tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp create mode 100644 tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp create mode 100644 tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp create mode 100644 tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp create mode 100644 tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp create mode 100644 tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp create mode 100644 tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp create mode 100644 tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp create mode 100644 tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp create mode 100644 tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp create mode 100644 tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp create mode 100644 tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp create mode 100644 tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp create mode 100644 tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp create mode 100644 tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp create mode 100644 tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp create mode 100644 tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp create mode 100644 tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp create mode 100644 tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp create mode 100644 tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp create mode 100644 tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp create mode 100644 tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp create mode 100644 tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp create mode 100644 tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp create mode 100644 tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp create mode 100644 tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp create mode 100644 tket/tests/TokenSwapping/test_FullTsa.cpp create mode 100644 tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp create mode 100644 tket/tests/TokenSwapping/test_SwapList.cpp create mode 100644 tket/tests/TokenSwapping/test_SwapListOptimiser.cpp create mode 100644 tket/tests/TokenSwapping/test_VariousPartialTsa.cpp create mode 100644 tket/tests/TokenSwapping/test_VectorListHybrid.cpp create mode 100644 tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp create mode 100644 tket/tests/TokenSwapping/test_main_entry_functions.cpp diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index 71b650d14b..e747dd8798 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -74,6 +74,7 @@ set(TKET_OPS_DIR ${TKET_SRC_DIR}/Ops) set(TKET_GATE_DIR ${TKET_SRC_DIR}/Gate) set(TKET_SIMULATION_DIR ${TKET_SRC_DIR}/Simulation) set(TKET_ROUTING_DIR ${TKET_SRC_DIR}/Routing) +set(TKET_TOKEN_SWAPPING_DIR ${TKET_SRC_DIR}/TokenSwapping) set(TKET_TRANSFORM_DIR ${TKET_SRC_DIR}/Transformations) set(TKET_CHARACTERISATION_DIR ${TKET_SRC_DIR}/Characterisation) set(TKET_PREDS_DIR ${TKET_SRC_DIR}/Predicates) @@ -156,6 +157,45 @@ set(TKET_SOURCES ${TKET_GRAPH_DIR}/ArticulationPoints.cpp ${TKET_GRAPH_DIR}/UIDConnectivity.cpp + # Token swapping + ${TKET_TOKEN_SWAPPING_DIR}/TSAUtils/DebugFunctions.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TSAUtils/DistanceFunctions.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TSAUtils/GeneralFunctions.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TSAUtils/SwapFunctions.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TSAUtils/VertexMappingFunctions.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TSAUtils/VertexSwapResult.cpp + ${TKET_TOKEN_SWAPPING_DIR}/ArchitectureMapping.cpp + ${TKET_TOKEN_SWAPPING_DIR}/BestFullTsa.cpp + ${TKET_TOKEN_SWAPPING_DIR}/CyclesCandidateManager.cpp + ${TKET_TOKEN_SWAPPING_DIR}/CyclesGrowthManager.cpp + ${TKET_TOKEN_SWAPPING_DIR}/CyclesPartialTsa.cpp + ${TKET_TOKEN_SWAPPING_DIR}/CyclicShiftCostEstimate.cpp + ${TKET_TOKEN_SWAPPING_DIR}/DistancesFromArchitecture.cpp + ${TKET_TOKEN_SWAPPING_DIR}/DistancesInterface.cpp + ${TKET_TOKEN_SWAPPING_DIR}/DynamicTokenTracker.cpp + ${TKET_TOKEN_SWAPPING_DIR}/HybridTsa00.cpp + ${TKET_TOKEN_SWAPPING_DIR}/main_entry_functions.cpp + ${TKET_TOKEN_SWAPPING_DIR}/NeighboursFromArchitecture.cpp + ${TKET_TOKEN_SWAPPING_DIR}/NeighboursInterface.cpp + ${TKET_TOKEN_SWAPPING_DIR}/PartialTsaInterface.cpp + ${TKET_TOKEN_SWAPPING_DIR}/PathFinderInterface.cpp + ${TKET_TOKEN_SWAPPING_DIR}/RiverFlowPathFinder.cpp + ${TKET_TOKEN_SWAPPING_DIR}/RNG.cpp + ${TKET_TOKEN_SWAPPING_DIR}/SwapListOptimiser.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TrivialTSA.cpp + ${TKET_TOKEN_SWAPPING_DIR}/VectorListHybridSkeleton.cpp + + # Token swapping table lookup + ${TKET_TOKEN_SWAPPING_DIR}/TableLookup/CanonicalRelabelling.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TableLookup/ExactMappingLookup.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TableLookup/FilteredSwapSequences.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TableLookup/PartialMappingLookup.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TableLookup/SwapConversion.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TableLookup/SwapListSegmentOptimiser.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TableLookup/SwapListTableOptimiser.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TableLookup/SwapSequenceTable.cpp + ${TKET_TOKEN_SWAPPING_DIR}/TableLookup/VertexMapResizing.cpp + # Transformations ${TKET_TRANSFORM_DIR}/Combinator.cpp ${TKET_TRANSFORM_DIR}/Rebase.cpp diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp new file mode 100644 index 0000000000..5d35024677 --- /dev/null +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -0,0 +1,70 @@ +#include "ArchitectureMapping.hpp" + +#include +#include + +namespace tket { +namespace tsa_internal { + +ArchitectureMapping::ArchitectureMapping(const Architecture& arch) + : m_arch(arch) { + const auto uids = arch.get_all_uids(); + m_vertex_to_node_mapping.reserve(uids.size()); + for (const UnitID& uid : uids) { + m_vertex_to_node_mapping.emplace_back(Node(uid)); + } + + for (size_t ii = 0; ii < m_vertex_to_node_mapping.size(); ++ii) { + const auto& node = m_vertex_to_node_mapping[ii]; + { + const auto citer = m_node_to_vertex_mapping.find(node); + if (citer != m_node_to_vertex_mapping.cend()) { + std::stringstream ss; + ss << "Duplicate node " << node.repr() << " at vertices " + << citer->second << ", " << ii; + throw std::runtime_error(ss.str()); + } + } + m_node_to_vertex_mapping[node] = ii; + } +} + +size_t ArchitectureMapping::number_of_vertices() const { + return m_vertex_to_node_mapping.size(); +} + +const Node& ArchitectureMapping::get_node(size_t vertex) const { + const auto num_vertices = number_of_vertices(); + if (vertex >= num_vertices) { + std::stringstream ss; + ss << "get_node: invalid vertex " << vertex << " (architecture only has " + << num_vertices << " vertices)"; + throw std::runtime_error(ss.str()); + } + return m_vertex_to_node_mapping[vertex]; +} + +size_t ArchitectureMapping::get_vertex(const Node& node) const { + const auto citer = m_node_to_vertex_mapping.find(node); + if (citer == m_node_to_vertex_mapping.cend()) { + std::stringstream ss; + ss << "get_vertex: node " << node.repr() << " has no vertex number"; + throw std::runtime_error(ss.str()); + } + return citer->second; +} + +const Architecture& ArchitectureMapping::get_architecture() const { + return m_arch; +} + +std::vector ArchitectureMapping::get_edges() const { + std::vector edges; + for (auto [node1, node2] : m_arch.get_connections_vec()) { + edges.emplace_back(get_swap(get_vertex(node1), get_vertex(node2))); + } + return edges; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/ArchitectureMapping.hpp b/tket/src/TokenSwapping/ArchitectureMapping.hpp new file mode 100644 index 0000000000..3b56f3e45a --- /dev/null +++ b/tket/src/TokenSwapping/ArchitectureMapping.hpp @@ -0,0 +1,78 @@ +#ifndef _TKET_TokenSwapping_ArchitectureMapping_H_ +#define _TKET_TokenSwapping_ArchitectureMapping_H_ + +#include "Architecture/Architectures.hpp" +#include "TSAUtils/SwapFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** For mapping between nodes in an architecture and size_t vertex numbers. + * The vertex numbers are merely the indices of each Node + * within the vector returned by the get_all_uids() function. + * + * For now, we don't want to use Node objects as (1) this would make + * TokenSwapping dependent on other parts of Tket and hence less modular, + * (2) it would probably slow things down significantly because Nodes + * contain extra data, like vectors and strings, which are relatively + * expensive to copy; vertices get copied and moved around many times + * by any TSA. + * + * TODO: it would be better to use a Vertex wrapper class + * instead of raw size_t. (Also, might change to unsigned instead of size_t). + */ +class ArchitectureMapping { + public: + /** The object must remain valid and unchanged + * throughout the life of this object. + * @param arch The finished Architecture object, must remain valid + * for the lifetime of this object. + */ + explicit ArchitectureMapping(const Architecture& arch); + + /** Convenient reference to the Architecture object we used + * to construct this ArchitectureMapping. + */ + const Architecture& get_architecture() const; + + /** The number of vertices in the Architecture. + * @return The number of vertices + */ + size_t number_of_vertices() const; + + /** Get the newly created vertex assigned to the node. + * Throws if the node is invalid. + * @param node The node within the original Architecture object + * @return The newly created vertex representing this node + */ + size_t get_vertex(const Node& node) const; + + /** Reverse of "get_vertex", throws if the vertex is invalid. + * @param vertex The vertex created by this ArchitectureMapping object. + * @return The node corresponding to this vertex. + */ + const Node& get_node(size_t vertex) const; + + /** Get the edges using the vertices created by this ArchitectureMapping + * object. The vertex numbers, of course, do not necessarily match with + * the Node uids of the underlying architecture object + * (that's why we have a mapping). + * @return The vector of edges in the architecture, using the new + * vertex numbers. + */ + std::vector get_edges() const; + + private: + /// Store a reference to the Architecture passed into the constructor. + const Architecture& m_arch; + + /// Element i is simply the node corresponding to vertex i. + node_vector_t m_vertex_to_node_mapping; + + /// Reverse of m_vertex_to_node_mapping; look up the index of a node. + std::map m_node_to_vertex_mapping; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/BestFullTsa.cpp b/tket/src/TokenSwapping/BestFullTsa.cpp new file mode 100644 index 0000000000..a70a20fc6a --- /dev/null +++ b/tket/src/TokenSwapping/BestFullTsa.cpp @@ -0,0 +1,52 @@ +#include "BestFullTsa.hpp" + +#include "DistancesFromArchitecture.hpp" +#include "NeighboursFromArchitecture.hpp" +#include "RiverFlowPathFinder.hpp" +#include "TableLookup/VertexMapResizing.hpp" + +namespace tket { +namespace tsa_internal { + +BestFullTsa::BestFullTsa() { m_name = "BestFullTsa"; } + +HybridTsa00& BestFullTsa::get_hybrid_tsa_for_testing() { return m_hybrid_tsa; } + +void BestFullTsa::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + const ArchitectureMapping& arch_mapping) { + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + RiverFlowPathFinder path_finder(distances, neighbours, m_rng); + m_rng.set_seed(); + append_partial_solution( + swaps, vertex_mapping, distances, neighbours, path_finder); +} + +void BestFullTsa::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + PathFinderInterface& path_finder) { + auto vm_copy = vertex_mapping; + + m_hybrid_tsa.append_partial_solution( + swaps, vm_copy, distances, neighbours, path_finder); + + // Still subject to experimentation, but this seems the best + m_swap_list_optimiser.optimise_pass_with_zero_travel(swaps); + m_swap_list_optimiser.optimise_pass_with_token_tracking(swaps); + m_swap_list_optimiser.optimise_pass_remove_empty_swaps(swaps, vertex_mapping); + m_swap_list_optimiser.full_optimise(swaps, vertex_mapping); + + VertexMapResizing map_resizing(neighbours); + std::set vertices_with_tokens_at_start; + for (const auto& entry : vertex_mapping) { + vertices_with_tokens_at_start.insert(entry.first); + } + m_table_optimiser.optimise( + vertices_with_tokens_at_start, map_resizing, swaps, + m_swap_list_optimiser); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/BestFullTsa.hpp b/tket/src/TokenSwapping/BestFullTsa.hpp new file mode 100644 index 0000000000..2ed5baf555 --- /dev/null +++ b/tket/src/TokenSwapping/BestFullTsa.hpp @@ -0,0 +1,68 @@ +#ifndef _TKET_TokenSwapping_BestFullTsa_H_ +#define _TKET_TokenSwapping_BestFullTsa_H_ +#include "ArchitectureMapping.hpp" +#include "HybridTsa00.hpp" +#include "RNG.hpp" +#include "SwapListOptimiser.hpp" +#include "TableLookup/SwapListTableOptimiser.hpp" + +namespace tket { +namespace tsa_internal { + +/** To enable easier experimentation, keep this up-to-date with the best + * end-to-end known default options, but also make it possible to change + * the options. + * Also include the best known postprocessing swap list optimisations. + */ +class BestFullTsa : public PartialTsaInterface { + public: + BestFullTsa(); + + /** We emphasise that, unlike the general PartialTsaInterface, the solution + * returned is complete, AND includes all known swap list optimisations. + * Warning: unlike most PartialTsaInterface objects, the vertex_mapping + * is NOT updated. (There's no point for a full TSA). + * @param swaps The list of swaps to append to (does not clear first). + * @param vertex_mapping The current desired mapping, giving (current source + * vertex)->(target vertex) mappings. NOT updated at the end. + * @param distances An object to calculate distances between vertices. + * @param neighbours An object to calculate adjacent vertices to any given + * vertex. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. (Of course, paths might not be unique if the graph + * is not a tree). + */ + virtual void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + PathFinderInterface& path_finder) override; + + /** Wrapper around the main append_partial_solution function, but constructing + * and using the best known PathFinderInterface object. The DistancesInterface + * and NeighboursInterface objects will automatically be constructed. + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping. Will be updated with + * the new added swaps. + * @param arch_mapping An ArchitectureMapping object, which knows the graph, + * how to do Node <-> vertex size_t conversions, etc. + */ + void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + const ArchitectureMapping& arch_mapping); + + /** For experiments, provide access to the internal stored TSA object. This + * function may be deleted later! + * @return Reference to the internal stored TSA object. + */ + HybridTsa00& get_hybrid_tsa_for_testing(); + + private: + HybridTsa00 m_hybrid_tsa; + SwapListOptimiser m_swap_list_optimiser; + SwapListTableOptimiser m_table_optimiser; + RNG m_rng; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.cpp b/tket/src/TokenSwapping/CyclesCandidateManager.cpp new file mode 100644 index 0000000000..d430527cf5 --- /dev/null +++ b/tket/src/TokenSwapping/CyclesCandidateManager.cpp @@ -0,0 +1,208 @@ +#include "CyclesCandidateManager.hpp" + +#include +#include +#include + +#include "TSAUtils/VertexSwapResult.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +CyclesCandidateManager::Options& CyclesCandidateManager::get_options() { + return m_options; +} + +size_t CyclesCandidateManager::fill_initial_cycle_ids(const Cycles& cycles) { + m_cycle_with_vertex_hash.clear(); + m_cycles_to_keep.clear(); + size_t cycle_length = 0; + for (auto id_opt = cycles.front_id(); id_opt; + id_opt = cycles.next(id_opt.value())) { + const auto& cycle = cycles.at(id_opt.value()); + const auto& vertices = cycle.vertices; + + if (cycle_length == 0) { + cycle_length = vertices.size(); + if (cycle_length < 2) { + throw std::runtime_error("Cycles too small"); + } + } else { + if (cycle_length != vertices.size()) { + throw std::runtime_error("Differing cycle sizes"); + } + } + if (cycle.decrease <= 0) { + throw std::runtime_error("Bad candidates stored"); + } + // We want 50*(decrease)/(num swaps) >= min_candidate_power_percentage. + // (We multiply by 50 because a swap can change L by 2, not 1). + if (50 * static_cast(cycle.decrease) < + (m_options.min_candidate_power_percentage * cycle_length)) { + continue; + } + + CycleData cycle_data; + cycle_data.id = id_opt.value(); + cycle_data.first_vertex_index = 0; + for (size_t ii = 1; ii < vertices.size(); ++ii) { + if (vertices[ii] < vertices[cycle_data.first_vertex_index]) { + cycle_data.first_vertex_index = ii; + } + } + size_t hash = static_cast(cycle.decrease); + for (size_t ii = 0; ii < cycle_length; ++ii) { + boost::hash_combine( + hash, vertices[(ii + cycle_data.first_vertex_index) % cycle_length]); + } + const auto prev_cycle_citer = m_cycle_with_vertex_hash.find(hash); + if (prev_cycle_citer == m_cycle_with_vertex_hash.cend()) { + m_cycle_with_vertex_hash[hash] = cycle_data; + } else { + // A previous cycle with this hash; but is it equal? + const auto& previous_cycle_data = prev_cycle_citer->second; + const auto& previous_cycle = cycles.at(previous_cycle_data.id); + if (previous_cycle.decrease == cycle.decrease) { + bool equal_vertices = true; + for (size_t ii = 0; ii < cycle_length; ++ii) { + if (previous_cycle.vertices + [(ii + previous_cycle_data.first_vertex_index) % + cycle_length] != + cycle.vertices + [(ii + cycle_data.first_vertex_index) % cycle_length]) { + equal_vertices = false; + break; + } + } + if (equal_vertices) { + // This new cycle is just the previous cycle repeated, + // but starting from a different vertex + continue; + } + } + } + m_cycles_to_keep.push_back(cycle_data.id); + } + return cycle_length; +} + +void CyclesCandidateManager::discard_lower_power_solutions( + const Cycles& cycles) { + int highest_decrease = 0; + for (auto id : m_cycles_to_keep) { + highest_decrease = std::max(highest_decrease, cycles.at(id).decrease); + } + if (highest_decrease <= 0) { + throw std::runtime_error("No good candidate cycles"); + } + for (size_t ii = 0; ii < m_cycles_to_keep.size();) { + if (cycles.at(m_cycles_to_keep[ii]).decrease < highest_decrease) { + // This cycle is not good enough. + // Erase this ID, by swapping with the back + m_cycles_to_keep[ii] = m_cycles_to_keep.back(); + m_cycles_to_keep.pop_back(); + continue; + } + // Keep this ID. Onto the next! + ++ii; + } +} + +void CyclesCandidateManager::sort_candidates(const Cycles& cycles) { + // Greedy heuristic: we want the maximal number of disjoint cycles. + // So, choose those which touch few others first. + // Experimentation is needed with other algorithms! + m_touching_data.clear(); + for (size_t ii = 0; ii < m_cycles_to_keep.size(); ++ii) { + // Automatically set to zero on first use. + m_touching_data[m_cycles_to_keep[ii]]; + + for (size_t jj = ii + 1; jj < m_cycles_to_keep.size(); ++jj) { + bool touches = false; + // For short cycles, not much slower than using sets + // or sorted vectors. + for (auto v1 : cycles.at(m_cycles_to_keep[ii]).vertices) { + if (touches) { + break; + } + for (auto v2 : cycles.at(m_cycles_to_keep[jj]).vertices) { + if (v1 == v2) { + touches = true; + break; + } + } + } + if (touches) { + ++m_touching_data[m_cycles_to_keep[ii]]; + ++m_touching_data[m_cycles_to_keep[jj]]; + } + } + } + // Now, sort... + auto& touching_data = m_touching_data; + std::sort( + m_cycles_to_keep.begin(), m_cycles_to_keep.end(), + [&touching_data](Cycles::ID lhs, Cycles::ID rhs) { + const auto lhs_touch_number = touching_data.at(lhs); + const auto rhs_touch_number = touching_data.at(rhs); + + // Don't JUST sort on the touch number, because then the order + // of equal-touch-number elements would be implementation dependent + // (i.e., not a "stable" sort across all platforms/compilers). + return (lhs_touch_number < rhs_touch_number) || + (lhs_touch_number == rhs_touch_number && lhs < rhs); + }); +} + +bool CyclesCandidateManager::should_add_swaps_for_candidate( + const Cycles& cycles, Cycles::ID id) { + const auto& cycle = cycles.at(id); + const auto& vertices = cycle.vertices; + for (auto v : vertices) { + if (m_vertices_used.count(v) != 0) { + return false; + } + } + for (auto v : vertices) { + m_vertices_used.insert(v); + } + return true; +} + +void CyclesCandidateManager::append_partial_solution( + const CyclesGrowthManager& growth_manager, SwapList& swaps, + VertexMapping& vertex_mapping) { + const auto& cycles = growth_manager.get_cycles(); + const size_t cycle_size = fill_initial_cycle_ids(cycles); + + if (m_cycles_to_keep.empty()) { + return; + } + const bool keep_lower_power_solutions = + (cycle_size == 2) + ? m_options.return_all_good_single_swaps + : m_options.return_lower_power_solutions_for_multiswap_candidates; + + if (!keep_lower_power_solutions) { + discard_lower_power_solutions(cycles); + } + sort_candidates(cycles); + m_vertices_used.clear(); + + // It's the final function, so don't bother erasing + // elements in m_cycles_to_keep. + for (auto id : m_cycles_to_keep) { + if (!should_add_swaps_for_candidate(cycles, id)) { + continue; + } + const auto& vertices = cycles.at(id).vertices; + for (size_t ii = vertices.size() - 1; ii > 0; --ii) { + VertexSwapResult(vertices[ii], vertices[ii - 1], vertex_mapping, swaps); + } + } +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.hpp b/tket/src/TokenSwapping/CyclesCandidateManager.hpp new file mode 100644 index 0000000000..769d70ce68 --- /dev/null +++ b/tket/src/TokenSwapping/CyclesCandidateManager.hpp @@ -0,0 +1,180 @@ +#ifndef _TKET_TokenSwapping_CyclesCandidateManager_H_ +#define _TKET_TokenSwapping_CyclesCandidateManager_H_ + +#include + +#include "CyclesGrowthManager.hpp" +#include "PartialTsaInterface.hpp" + +namespace tket { +namespace tsa_internal { + +/** Concerned with filtering and selecting candidate cycles + * to convert into a swap sequence. Used by CyclesPartialTsa. + * For further explanation, please see the comments for the class + * CyclesPartialTsa. + * + * This is used when all cycles are valid candidates to be converted + * into swap sequences. This class selects the ones to use. + * All cycle candidates are assumed to have the same length + * (swaps are just cycles on 2 vertices), but have different "power", + * i.e. different overall contribution to the decrease of L, the sum of + * the distances between the current vertex of each token and its target. + * + * We only want to return solutions which strictly decrease L, so that + * we're guaranteed to make progress (or make no change). + * We must select a subset of disjoint cycles, since if they + * were not disjoint, the returned solution might not decrease L. + * (We based all our calculations on treating the cycles individually, + * so obviously non-disjoint cycles could behave very differently). + */ +class CyclesCandidateManager { + public: + /** These control the behaviour of filtering for candidate selection. + * Experimentation needed to find the best options. + */ + struct Options { + // In both these options, we have a whole collection of candidate + // swap sequences. + // We can EITHER perform just the best single candidate, + // OR carry out multiple swap sequences simultaneously, + // by selecting a large disjoint subset. + // However, returning multiple sequences, although probably faster + // to compute overall, might give a worse end-to-end solution + // (but this needs testing). (But of course it may actually be slower. + // All these are just guesses, need testing!) + // The reason is that, once the tokens + // have shifted a little bit, it may enable better solutions + // (sequences of higher power) which the algorithm previously + // did not detect. + + /** Setting this to "false" means that only the best single swaps + * will be returned, the others being discarded. (E.g., if some swaps + * move two tokens closer to home, i.e. have "power" two, then + * "power one" swaps - those which only move one token closer to home, + * the other token being empty, or remaining at the same distance from + * its target - will be discarded). + */ + bool return_all_good_single_swaps = false; + + /** The same as "return_all_good_single_swaps", but for cycles + * on >= 3 vertices. Do we return ALL cycle solutions, or only those + * which decrease L by the largest amount? + */ + bool return_lower_power_solutions_for_multiswap_candidates = false; + + /** The "power" of a swap sequence is (total L decrease) / (number of + * swaps). Since a swap can change L by -2,-1,0,1,2 (since up to 2 tokens + * are moved one step), always |power| <= 2. But let's assume that negative + * power candidates are discarded, and rescale to be a percentage. Discard + * all candidates with power percentage smaller than this. Note that only + * fairly dense problems (lots of tokens, or all clustered close together) + * are likely to give higher powers; if all tokens are far apart, or there + * are very few of them, then swapping two nonempty tokens is rare, so + * immediately most candidates would not expect to reach even 50% power. + */ + unsigned min_candidate_power_percentage = 0; + }; + + /// Provide access to the options used, to change them if desired. + Options& get_options(); + + /** The "CyclesGrowthManager" object stores the candidate cycles internally, + * then we select the set of candidates to use, convert them into swaps, + * and append them to the list of swaps. (All distance data has already + * been calculated and ctored within the cycles). + * + * @param growth_manager The object containing the candidate cycles + * @param swaps The list of swaps we will add to, once we convert + * the candidates into swaps. + * @param vertex_mapping The current vertex->target vertex mapping, + * which will be updated with the added swaps. + */ + void append_partial_solution( + const CyclesGrowthManager& growth_manager, SwapList& swaps, + VertexMapping& vertex_mapping); + + private: + Options m_options; + + /** Information about the stored candidates, for filtering. */ + struct CycleData { + Cycles::ID id; + + /** The vertices are listed in a vector. + * Store the index, in the vector, of the lowest valued vertex. + * The purpose is to detect duplicate stored cycles (starting from + * a different vertex) and discard all but one of them. + * (Unfortunately necessary because, as cycles are being built up, + * we don't know which final vertices will occur, so we can get many + * duplicate subpaths. Is there a clever data structure to improve this?) + */ + size_t first_vertex_index; + }; + + /** Key: a hash of the vertices in the cycle + * Value: information about the candidate cycles of the last cycle + * with that hash. (Hash collisions are expected to be very rare, and they + * cause no actual problem, so it's probably faster not to use complete + * buckets to resolve hash collisions). + * Used to find duplicate cycles (the same vertices in the same cyclic + * order, but with different start vertex in the vector). + */ + std::map m_cycle_with_vertex_hash; + + /** We will discard duplicate cycles. For better constness, we don't delete + * cycles, we just store the IDs of those ones we want to use. + */ + std::vector m_cycles_to_keep; + + /** Key: a cycle ID + * Value: how many other cycles it touches (i.e., cycles sharing a vertex + * with it, so not disjoint). + * This will be used to select a large subset of pairwise disjoint + * cycles, with a simple greedy algorithm. + */ + std::map m_touching_data; + + /** Used by should_add_swaps_for_candidate, to see whether a cycle + * is disjoint from those already selected. + */ + std::set m_vertices_used; + + /** Fills m_cycles_to_keep (so, effectively discarding unsuitable cycles), + * returns the common cycle length. + * @param cycles The complete collection of candidate cycles. + * @return The number of vertices in each cycle + * (all cycles should be the same length). + */ + size_t fill_initial_cycle_ids(const Cycles& cycles); + + /** Updates m_cycles_to_keep. Keep only those solutions with the + * highest L-decrease. + * @param cycles The complete collection of candidate cycles, + * but we already have filled m_cycles_to_keep so will + * only consider those cycles. + */ + void discard_lower_power_solutions(const Cycles& cycles); + + /** Sorts m_cycles_to_keep so that those which touch + * the fewest other cycles are listed first. + * @param cycles The complete collection of candidate cycles, + * but we only consider those cycles with IDs in m_cycles_to_keep. + */ + void sort_candidates(const Cycles& cycles); + + /** Checks if the candidate is disjoint from all other candidates + * currently used (stored in m_vertices_used). If so updates + * m_vertices_used and returns true (but takes no other action). + * Otherwise, do nothing and return false. + * @param cycles The complete collection of candidate cycles. + * @param id The single cycle under consideration. + * @return whether this single cycle should be added to the collection + * of candidates. + */ + bool should_add_swaps_for_candidate(const Cycles& cycles, Cycles::ID id); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.cpp b/tket/src/TokenSwapping/CyclesGrowthManager.cpp new file mode 100644 index 0000000000..6770c871af --- /dev/null +++ b/tket/src/TokenSwapping/CyclesGrowthManager.cpp @@ -0,0 +1,185 @@ +#include "CyclesGrowthManager.hpp" + +#include + +#include "TSAUtils/DistanceFunctions.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +bool Cycle::contains(size_t vertex) const { + for (auto vv : vertices) { + if (vertex == vv) { + return true; + } + } + return false; +} + +CyclesGrowthManager::Options& CyclesGrowthManager::get_options() { + return m_options; +} + +const Cycles& CyclesGrowthManager::get_cycles( + bool throw_if_cycles_are_not_candidates) const { + if (throw_if_cycles_are_not_candidates && !m_cycles_are_candidates) { + throw std::runtime_error("get_cycles called with non-candidate cycles"); + } + return m_cycles; +} + +bool CyclesGrowthManager::reset( + const VertexMapping& vertex_mapping, DistancesInterface& distances, + NeighboursInterface& neighbours) { + m_cycles.clear(); + m_cycles_are_candidates = false; + + // OK, a bit inefficient, every really good swap (decreasing L by 2) + // will appear twice, but not a disaster. + // If no such swap exists, then this stored data will be necessary, because + // direction matters in longer cycles: v0->v1->v2->v0 is very different + // from v2->v1->v0->v2. + // It's simplest just to treat swaps as special cases of cycles, on 2 + // vertices. + for (const auto& entry : vertex_mapping) { + const auto source = entry.first; + const auto target = entry.second; + const auto source_distance_to_target = distances(source, target); + if (source_distance_to_target == 0) { + continue; + } + const auto& adj_vertices = neighbours(source); + for (auto adj_v : adj_vertices) { + const auto other_v_distance_to_target = distances(adj_v, target); + if (other_v_distance_to_target < source_distance_to_target) { + const auto new_id = m_cycles.emplace_back(); + auto& cycle = m_cycles.at(new_id); + cycle.decrease = 1; + cycle.vertices.resize(2); + cycle.vertices[0] = source; + cycle.vertices[1] = adj_v; + if (m_cycles.size() >= m_options.max_number_of_cycles) { + return true; + } + } + } + } + return !m_cycles.empty(); +} + +bool CyclesGrowthManager::attempt_to_close_cycles( + const VertexMapping& vertex_mapping, DistancesInterface& distances) { + if (m_cycles_are_candidates) { + throw std::runtime_error( + "Calling attempt_to_close_cycles when we already have " + "candidates"); + } + for (auto id_opt = m_cycles.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = m_cycles.next(id); + auto& cycle = m_cycles.at(id); + const int decrease = get_move_decrease( + vertex_mapping, cycle.vertices.back(), cycle.vertices[0], distances); + const int new_decrease = cycle.decrease + decrease; + if (new_decrease > 0) { + cycle.decrease = new_decrease; + if (!m_cycles_are_candidates) { + // It's the first good one, so delete all previous. + for (auto prev_id_opt = m_cycles.previous(id); prev_id_opt;) { + const auto id_to_be_deleted = prev_id_opt.value(); + prev_id_opt = m_cycles.previous(id_to_be_deleted); + m_cycles.erase(id_to_be_deleted); + } + } + m_cycles_are_candidates = true; + } else { + // Not a good closed cycle; do we delete it? + if (m_cycles_are_candidates) { + m_cycles.erase(id); + } + } + } + return m_cycles_are_candidates; +} + +CyclesGrowthManager::GrowthResult CyclesGrowthManager::attempt_to_grow( + const VertexMapping& vertex_mapping, DistancesInterface& distances, + NeighboursInterface& neighbours) { + GrowthResult result; + + if (m_cycles.empty()) { + throw std::runtime_error("Calling attempt_to_grow with no cycles stored"); + } + if (m_cycles.front().vertices.size() >= m_options.max_cycle_size) { + m_cycles.clear(); + result.hit_cycle_length_limit = true; + result.empty = true; + return result; + } + for (auto id_opt = m_cycles.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = m_cycles.next(id); + + // Add an arrow onto the back. + const auto back_vertex = m_cycles.at(id).vertices.back(); + const auto& adj_vertices = neighbours(back_vertex); + for (auto adj_v : adj_vertices) { + int new_decr; + { + // Important not to reuse this once cycles are added, + // as it may be invalidated + const auto& cycle = m_cycles.at(id); + if (cycle.contains(adj_v)) { + continue; + } + new_decr = + cycle.decrease + + get_move_decrease(vertex_mapping, back_vertex, adj_v, distances); + + // If there are N moves, each move can only decrease L by at most one, + // so it's unfair to demand a huge L-decrease, because shorter cycles + // would be killed immediately. + // With N vertices there are N-1 moves, but we are about to add + // the new vertex adj_v to this partial cycle (unless we discard it), + // taking it back up to N. + const int num_moves = cycle.vertices.size(); + int min_decrease = num_moves; + min_decrease = + std::min(min_decrease, m_options.min_decrease_for_partial_path); + + // We want 100*(L-decr)/(num.moves) >= + // min_power_percentage_for_partial_path. But we need the ceiling + // because of interger division. + min_decrease = std::max( + min_decrease, + (99 + m_options.min_power_percentage_for_partial_path * num_moves) / + 100); + + if (new_decr < min_decrease) { + continue; + } + } + // A new cycle to be added. Add it before the current position, + // so we won't pass through it again in the main loop. + const auto new_id = m_cycles.insert_before(id); + auto& new_cycle = m_cycles.at(new_id); + new_cycle.decrease = new_decr; + new_cycle.vertices = m_cycles.at(id).vertices; + new_cycle.vertices.push_back(adj_v); + if (m_cycles.size() >= m_options.max_number_of_cycles) { + // Break out of the INNER loop, i.e. neighbours for this + // cycle endpoint. However, this cycle is about to be deleted, + // creating space, so continue with further cycles. + break; + } + } + m_cycles.erase(id); + } + result.empty = m_cycles.empty(); + return result; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.hpp b/tket/src/TokenSwapping/CyclesGrowthManager.hpp new file mode 100644 index 0000000000..670b19da29 --- /dev/null +++ b/tket/src/TokenSwapping/CyclesGrowthManager.hpp @@ -0,0 +1,222 @@ +#ifndef _TKET_TokenSwapping_CyclesGrowthManager_H_ +#define _TKET_TokenSwapping_CyclesGrowthManager_H_ + +#include "DistancesInterface.hpp" +#include "NeighboursInterface.hpp" +#include "TSAUtils/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** Contains information about a cyclic shift. Note that "moves" + * are not swaps; they are "half swaps". I.e., a move v1->v2 + * means that we pretend that v2 has no token on it, and see + * what would happen if we moved the token on v1 to v2, ignoring + * whatever token is on v2. + * It's important to realise that moves are impossible to do by themselves, + * if both vertices contain tokens; it is only SEQUENCES of moves + * which may sometimes be converted into swaps. + * For example, assuming that edges v0-v1 and v1-v2 exist, + * the length 3 move sequence v0->v1->v2->v0 + * may be enacted by 2 swaps (v0, v1) . (v1, v2). + * Notice that the edge v0-v2 does NOT have to exist. Also, this cyclic shift + * is still possible in 2 swaps if any 2 of the 3 edges v0-v1, v1-v2, v0-v2 + * exist. + */ +struct Cycle { + /** By how much would L (the sum of distances from current vertex + * to target vertex) decrease? Can be positive or negative. + * It has two different interpretations: + * for the first, for OPEN cycles, + * we simply IGNORE the token on v(N), the last vertex, + * and store the decrease for the partial cyclic shift + * v0->v1->v2->v3-> ... -> v(N), AS IF there were no token on v(N). + * + * For the second interpretation, once "attempt_to_close_cycles" + * has returned true, this switches meaning to the L-decrease for + * the FULL cycle, i.e. including the v(N)->v(0) decrease. + */ + int decrease; + + /** The abstract move sequence moves each vertex to the next in the list. + * When the cycle is closed, the final vertex moves back to the start. + * [v0,v1,v2,v3,...,vN] must be a genuine path (the edges must exist), + * BUT the edge vN -> v0 to close the cycle does NOT have to exist. + */ + std::vector vertices; + + /** We need this to maintain paths without duplicate vertices. + * Maintaining a std::set of vertices for quick lookup would work, + * BUT actually "vertices" is always quite small, + * so just do a linear search. + * @param vertex A vertex + * @return whether that vertex already exists in "vertices". + */ + bool contains(size_t vertex) const; +}; + +typedef VectorListHybrid Cycles; + +/** Concerned only with growing cycles and closing them. + * For use in CyclesPartialTsa. We build up collections of cycles + * with information about what would happen if it were closed, + * i.e. the complete cycle were performed somehow, + * and also ensure when we grow cycles that the newly added vertex + * is not already present. + * + * Note that longer cycles need more swaps, so our heuristic is to prefer + * shorter cycles, if all else is equal. + * (In the best possible case, if every + * abstract token move v(i)->v(i+1) moved one token closer to home, + * then the total L-decrease would be V, for V vertices, but would need + * V+1 swaps to perform, for a "power" (L-decrease per swap) of (V+1)/V + * which is actually decreasing in V). + * [Of course it's only a heuristic, not necessarily optimal, because + * doing short-term worse moves now might allow better moves + * in the long term - always the problem with optimisation]. + */ +class CyclesGrowthManager { + public: + /** These control the behaviour; experimentation is needed + * to find the best values. + */ + struct Options { + size_t max_cycle_size = 6; + + /** The worst-case total number of cycles grows exponentially, + * e.g. the complete graph with n vertices has ~ 0.5 n^2 edges, + * but >> 2^n cycles. + * + * We avoid exponential time/space blowup by limiting the number + * of cycles under consideration; any more are just discarded. + */ + size_t max_number_of_cycles = 1000; + + /** Discard a partially built up cycle as soon as the L-decrease + * (decrease of total distances of vertices from their targets) + * drops below this. + * + * Larger values should lead to a more "aggressive", "greedy-like" + * algorithm, which MAY be better - more experimentation needed. + * + * This can even be negative, giving cycles the chance to be initially bad, + * but later turn good. + */ + int min_decrease_for_partial_path = 0; + + /** Similar to "min_decrease_for_partial_path", but expressed + * in terms of "power". Power is defined as (L-decrease)/(number of moves), + * which is always between -1 and +1 since each move (NOT a swap!) + * changes L by one of -1,0,+1. + * Express as a percentage to handle fractions. + * The partial cycle will be discarded unless BOTH criteria using + * min_decrease_for_partial_path AND this are satisfied. + */ + int min_power_percentage_for_partial_path = 0; + }; + + /** Access the options, to change if desired. */ + Options& get_options(); + + /** Simply returns the stored cycles. For an extra security check: + * unless you request otherwise (e.g., for debugging purposes), + * you can ONLY extract the cycles once they are + * converted into good candidates by "attempt_to_close_cycles". + * Note that some cycles may be repeated, e.g. [v0, v1, v2] and [v1, v2, v0] + * might both occur; further filtering is necessary. + * + * Of course [v2, v1, v0] would be a totally different cycle, + * as the direction is reversed. + * + * @param throw_if_cycles_are_not_candidates The intended use is to call this + * function only once candidates arise. If you call this function + * without having candidates, then it throws if this is set to true (the + * default value). But for testing/debugging, it is helpful to call this + * function just to inspect the cycles, and so this parameter should be set to + * false. + * @return The stored cycles. + */ + const Cycles& get_cycles( + bool throw_if_cycles_are_not_candidates = true) const; + + /** Start a new problem. The next function to call is + * "attempt_to_close_cycles". Of course, swaps are just cycles with 2 + * vertices. + * @param vertex_mapping Where does each vertex want to move? (I.e., it's + * a current vertex -> target vertex map). Can be partial, i.e. not every + * vertex has to have a token. + * @param distances Object to calculate distances between vertices + * @param neighbours Object to calculate vertices adjacent to a given vertex + * @return True if it found at least some good moves, false if it couldn't + * find ANY good moves (which must mean that all tokens are home). + * Recall that a move is only a "half swap". + */ + bool reset( + const VertexMapping& vertex_mapping, DistancesInterface& distances, + NeighboursInterface& neighbours); + + /** For each cycle, see what would happen if we performed the full cycle + * (i.e., "closed the cycle"). + * The current cycles are stored as paths [v0, v1, ..., vn], where the edges + * v(i) <-> v(i+1) exist, for 0 <= i < n. + * Even if the edge v(n)->v(0) does not exist, the cycle is POSSIBLE + * by "swapping along" the path [v0, v1, ..., vn]. The end result is a cyclic + * shift. If at least one cycle could be closed to create a viable candidate + * (giving a net decrease in L), return true and delete all cycles which are + * NOT candidates, and also fill in the L-decrease values for the CLOSED + * cycle. If NO cycle closures give a good result, do nothing and return + * false. + * @param vertex_mapping The desired (source vertex->target vertex) mapping, + * for the current locations of tokens on vertices. + * @param distances Object to calculate distances, used to calculate + * L-decreases. + * @return True if at least one good closed cycle exists (i.e., giving net + * strict decrease of L). If so, all non-good cycles are deleted. If no good + * closed cycle exists, do nothing and return false. + */ + bool attempt_to_close_cycles( + const VertexMapping& vertex_mapping, DistancesInterface& distances); + + /** Record what happens when we try to GROW cycles (i.e., increase the length + * of each stored cycle by one, discarding all those which could not grow). + */ + struct GrowthResult { + /** If TRUE, there are no more cycles to consider; finish. */ + bool empty = false; + + /** If we're already at the length limit, delete all cycles. + * (There is no further use for them, so this is safest). + * However, this is not the only possible way for all cycles to be deleted. + * There might not be any other vertices in the graph to add; + * or they might all be bad cycles (i.e., not decreasing L by enough to + * keep them). + */ + bool hit_cycle_length_limit = false; + }; + + /** For each existing cycle, try all possible ways to extend it + * by one step from the last vertex. + * Keep all new cycles generated in this way with a good L decrease, + * and discard all others (including the original cycle). + * Thus, all cycles should have the same number of vertices, increasing + * by one each time this function is called (unless they are all deleted). + * @param vertex_mapping The current desired (source vertex -> target vertex) + * mapping. + * @param distances Object to calculate distances, used to calculate + * L-decreases. + * @param neighbours Object to calculate adjacent vertices to a given vertex. + * @return What happened when we tried to grow the cycles. + */ + GrowthResult attempt_to_grow( + const VertexMapping& vertex_mapping, DistancesInterface& distances, + NeighboursInterface& neighbours); + + private: + Cycles m_cycles; + Options m_options; + bool m_cycles_are_candidates = false; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/CyclesPartialTsa.cpp b/tket/src/TokenSwapping/CyclesPartialTsa.cpp new file mode 100644 index 0000000000..04326442e8 --- /dev/null +++ b/tket/src/TokenSwapping/CyclesPartialTsa.cpp @@ -0,0 +1,91 @@ +#include "CyclesPartialTsa.hpp" + +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +CyclesPartialTsa::CyclesPartialTsa() { m_name = "Cycles"; } + +CyclesGrowthManager::Options& CyclesPartialTsa::growth_options() { + return m_growth_manager.get_options(); +} + +CyclesCandidateManager::Options& CyclesPartialTsa::candidate_options() { + return m_candidate_manager.get_options(); +} + +void CyclesPartialTsa::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + PathFinderInterface& path_finder) { + // We'll add the calculated swaps to the path finder at the end. + // THIS is the right place to do it, not the caller, because + // (as far as the caller knows) it's possible that PartialTSA objects + // reduce/reorder swaps, and so it would be invalid just to go back through + // the appended swaps. However, THIS class knows that no reordering or + // reduction occurs. + const size_t initial_swap_size = swaps.size(); + for (;;) { + const auto swap_size_before = swaps.size(); + single_iteration_partial_solution( + swaps, vertex_mapping, distances, neighbours); + const auto swap_size_after = swaps.size(); + TKET_ASSERT(swap_size_after >= swap_size_before); + if (swap_size_before == swap_size_after) { + break; + } + } + const size_t final_swap_size = swaps.size(); + TKET_ASSERT(initial_swap_size <= final_swap_size); + if (initial_swap_size == final_swap_size || + !path_finder.edge_registration_has_effect()) { + return; + } + // At least one swap was added. + const auto current_back_id_opt = swaps.back_id(); + TKET_ASSERT(current_back_id_opt); + auto current_id = current_back_id_opt.value(); + for (size_t remaining_swaps = final_swap_size - initial_swap_size;;) { + const auto& swap = swaps.at(current_id); + path_finder.register_edge(swap.first, swap.second); + --remaining_swaps; + if (remaining_swaps == 0) { + break; + } + const auto prev_id_opt = swaps.previous(current_id); + TKET_ASSERT(prev_id_opt); + current_id = prev_id_opt.value(); + } +} + +void CyclesPartialTsa::single_iteration_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours) { + if (!m_growth_manager.reset(vertex_mapping, distances, neighbours)) { + // no solutions. + return; + } + + for (auto infinite_loop_guard = m_growth_manager.get_options().max_cycle_size; + infinite_loop_guard > 0; --infinite_loop_guard) { + if (m_growth_manager.attempt_to_close_cycles(vertex_mapping, distances)) { + // Some solutions found. + m_candidate_manager.append_partial_solution( + m_growth_manager, swaps, vertex_mapping); + return; + } + // No solutions so far, so grow... + const auto growth_result = + m_growth_manager.attempt_to_grow(vertex_mapping, distances, neighbours); + if (growth_result.empty || growth_result.hit_cycle_length_limit) { + return; + } + } + TKET_ASSERT(!"growth_manager termination"); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/CyclesPartialTsa.hpp b/tket/src/TokenSwapping/CyclesPartialTsa.hpp new file mode 100644 index 0000000000..7f8dfe7421 --- /dev/null +++ b/tket/src/TokenSwapping/CyclesPartialTsa.hpp @@ -0,0 +1,105 @@ +#ifndef _TKET_TokenSwapping_CyclesPartialTsa_H_ +#define _TKET_TokenSwapping_CyclesPartialTsa_H_ + +#include "CyclesCandidateManager.hpp" +#include "PartialTsaInterface.hpp" + +namespace tket { +namespace tsa_internal { + +/** A partial TSA (token swapping algorithm), similar to the cycle-finding + * algorithm as described in the 2016 paper "Approximation and Hardness of + * Token Swapping" by T.Miltzow and others: + * + * https://arxiv.org/abs/1602.05150 + * + * However, our algorithm differs from the paper in several important ways: + * (1) We also accept partial mappings, i.e. there might not be a token on + * every vertex. (2) It is only a partial TSA, not a full TSA (it may give up + * early). Thus, a full end-to-end solution must combine this with another TSA). + * (3) It does not detect long cycles. (4) It never returns "unhappy + * swaps", and is strictly monotonic: L, the sum of distances of a vertex to its + * target, either strictly decreases, or stays the same and no swaps are + * performed. (However, within each cycle, it is possible to have bad swaps + * which don't decrease L much, or even increase L, as long as the + * overall result is a decrease in L). (5) The closing edge of a cycle is not + * required to exist in the graph. + * + * Thus, neither this nor the algorithm in the paper is a generalisation of or + * necessarily better/worse than the other. + * + * One of the ideas in the 2016 paper is to detect good cycles (cyclic shifts) + * v0->v1-> ... ->vn->v0, by searching for cycles in a directed graph. + * It is guaranteed to find cycles if they exist, no matter the length. So, by + * (3), it is better than ours in this sense. However, we don't need the full + * cycle to exist, by (5), since we swap along the path [v0,v1,v2,...,vn]. + * Hence, the paper algorithm is worse than ours in this respect. Regarding (2) + * and (4), the paper is better than ours because it always completes. + */ +class CyclesPartialTsa : public PartialTsaInterface { + public: + CyclesPartialTsa(); + + /** Access the options of the inner stored object, to change behaviour. + * TODO: do many experiments, to find the best possible parameters. + * Then, set them as defaults and possibly remove this function. + * @return Options controlling behaviour of cycle growing. + */ + CyclesGrowthManager::Options& growth_options(); + + /** Access the options of the inner stored object, to change behaviour. + * TODO: do many experiments, to find the best possible parameters. + * @return Options controlling the filtering and selection of candidate + * cycles to convert to swaps. + */ + CyclesCandidateManager::Options& candidate_options(); + + /** Calculate a solution to improve the current token configuarion, + * add the swaps to the list, and carry out the swaps on "vertex_mapping". + * We don't need a path finder because the cycles are built up one vertex + * at a time, so we only need distances and neighbours. + * There is no point in calling this multiple times; + * it will continue until EITHER all tokens are home, OR it gives up. + * @param swaps The list of swaps to add to. + * @param vertex_mapping The current state, giving vertex->target mappings. + * Will be updated if any new swaps are performed. + * @param distances An object to calculate distances between vertices. + * @param neighbours An object to calculate the neighbours of a vertex. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. (Of course, paths might not be unique if the graph + * is not a tree, so it is an important part of the heuristics that + * the returned paths are fairly "consistent", i.e. "nearby" vertex pairs + * should return "nearby" paths). + */ + virtual void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + PathFinderInterface& path_finder) override; + + private: + /** Stores cycles, and controls the growth and discarding of cycles. + * We grow the cycles one vertex at a time until we reach a good cycle + * which is worth turning into swaps. + * If we never find a good cycle then we give up without returning a + * solution. + */ + CyclesGrowthManager m_growth_manager; + + /** Controls the final selection of cycles to perform. Once we've found + * some good cycles, we may not be able to perform all of them + * (because they might not be disjoint, so interfere with each other). + * We may not even want to perform them all, depending upon the options. + */ + CyclesCandidateManager m_candidate_manager; + + /** "append_partial_solution" simply loops, calling this repeatedly until + * it gives up, or all tokens are home. + */ + void single_iteration_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp new file mode 100644 index 0000000000..6de62fe698 --- /dev/null +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp @@ -0,0 +1,53 @@ +#include "CyclicShiftCostEstimate.hpp" + +#include "Utils/Assert.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +CyclicShiftCostEstimate::CyclicShiftCostEstimate( + const std::vector& vertices, DistancesInterface& distances) { + TKET_ASSERT(vertices.size() >= 2); + // We first work out the total distance v(0)->v(1)-> .. -> v(n) -> v(0). + // If we snip out v(i)->v(i+1), the remaining path tells us how many swaps + // we need. So, we must snip out the LARGEST distance(v(i), v(i+1)). + size_t largest_distance = distances(vertices.back(), vertices[0]); + size_t total_distance = largest_distance; + + if (vertices.size() == 2) { + start_v_index = 0; + } else { + // The value i such that distance(v(i), v(i+1)) is largest. + size_t v_index_with_largest_distance = vertices.size() - 1; + for (size_t ii = 0; ii + 1 < vertices.size(); ++ii) { + const auto distance_i = distances(vertices[ii], vertices[ii + 1]); + TKET_ASSERT(distance_i > 0); + total_distance += distance_i; + if (distance_i < largest_distance) { + largest_distance = distance_i; + v_index_with_largest_distance = ii; + } + } + // Now, remove the largest distance again... + total_distance -= largest_distance; + // We've snipped out (v[i], v[i+1]), so logically we start from v[i+1]. + start_v_index = (v_index_with_largest_distance + 1) % vertices.size(); + } + // To enact an abstract cyclic shift [a,b,c,d], + // choose abstract swaps (cd), (bc), (ab). + // The number of CONCRETE swaps to enact an abstract swap (xy) is + // 2.dist(x,y) - 1. + // e.g., to swap x,y along the path [x,u,v,y], dist(x,y)=3, + // we use 5 concrete vertex swaps (xu), (uv), (vy), (uv), (xu). + // What we've currently stored is the sum of dist(x,y), + // and clearly (sum)(-1) = -(Number of terms in the sum). + estimated_concrete_swaps = 2 * total_distance; + TKET_ASSERT(estimated_concrete_swaps > vertices.size() - 1); + estimated_concrete_swaps -= vertices.size() - 1; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp new file mode 100644 index 0000000000..031d6901d8 --- /dev/null +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp @@ -0,0 +1,64 @@ +#ifndef _TKET_TokenSwapping_CyclicShiftCostEstimate_H_ +#define _TKET_TokenSwapping_CyclicShiftCostEstimate_H_ + +#include + +#include "DistancesInterface.hpp" + +namespace tket { +namespace tsa_internal { + +/** Used in the TrivialTSA class (NOT in CyclesPartialTsa!) + * Given a desired abstract cyclic shift on [v0, v1, v2, ..., vn], + * i.e. abstract moves v(0)->v(1)->v(2)-> ... ->v(n)->v(0), + * [meaning that v(i), v(i+1) need not actually be adjacent in the graph, + * so we must decide how to represent the desired moves as actual swaps], + * there are n+1 possible obvious ways to enact it + * (and of course, maybe some "nonobvious" ways. + * Finding a good way is, of course, a special case of the Token Swapping + * problem which we're trying to solve!) + * (Of course, also maybe more than n+1 "obvious" ways because paths + * from v[i] to v[i+1] might not be unique). + * + * It's important that the overall effect of the complete cycle + * doesn't move any OTHER tokens, so that we can GUARANTEE that + * the final TrivialTSA solution really does terminate in all cases. + * + * We can "swap along" the path v(i), v(i+1), ..., v(i+n) for any 0 <= i <= n + * (regarding the v indices as wrapping around cyclicly, + * i.e. reducing (i+n) mod (n+1).) + * + * This finds a choice giving the smallest number of concrete swaps, + * assuming no additional swap optimisation, + * and disregarding the tokens on the vertices. + * + * It may not be the genuinely best solution + * because (1) swap sequences can often be optimised; + * (2) some of the swaps may be empty, and hence removable. + * But finding a truly optimal solution, taking these into account, + * is probably about as hard as the general token swapping problem. + */ +struct CyclicShiftCostEstimate { + /** A simple estimate of how many swaps will be needed. */ + size_t estimated_concrete_swaps = 0; + + /** If the stored vertices are v[0], v[1], ..., v[n], + * this is the value of i such that swapping along the abstract path + * v[i], v[i+1], ..., v[i+n] gives the smallest number of swaps. + * (Remembering that each abstract move is v[j] -> v[j+1]). + */ + size_t start_v_index = std::numeric_limits::max(); + + /** Calculate the data upon construction. + * @param vertices The list of vertices, in order, for a cyclic shift. + * Must have size >= 2. + * @param distances An object to calculate distances (we don't need to know + * WHICH path between vertices will be used, at this stage). + */ + CyclicShiftCostEstimate( + const std::vector& vertices, DistancesInterface& distances); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp new file mode 100644 index 0000000000..b5db7c9f84 --- /dev/null +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp @@ -0,0 +1,78 @@ +#include "DistancesFromArchitecture.hpp" + +#include +#include + +namespace tket { +namespace tsa_internal { + +DistancesFromArchitecture::DistancesFromArchitecture( + const ArchitectureMapping& arch_mapping) + : m_arch_mapping(arch_mapping) {} + +void DistancesFromArchitecture::register_shortest_path( + const std::vector& path) { + // To avoid quadratic growth for really long paths, + // just do various slices. + if (path.size() <= 5) { + register_shortest_path_with_limits(path, 0, path.size()); + return; + } + const size_t middle = path.size() / 2; + if (path.size() <= 10) { + register_shortest_path_with_limits(path, 0, middle); + register_shortest_path_with_limits(path, middle, path.size()); + register_edge(path[middle - 1], path[middle]); + return; + } + register_shortest_path_with_limits(path, 0, 5); + register_shortest_path_with_limits(path, path.size() - 5, path.size()); + if (path.size() >= 15) { + register_shortest_path_with_limits(path, middle - 2, middle + 3); + } +} + +void DistancesFromArchitecture::register_shortest_path_with_limits( + const std::vector& path, size_t begin, size_t end) { + for (size_t ii = begin; ii < end; ++ii) { + for (size_t jj = ii + 1; jj < end; ++jj) { + m_cached_distances[get_swap(path[ii], path[jj])] = jj - ii; + } + } +} + +void DistancesFromArchitecture::register_edge(size_t vertex1, size_t vertex2) { + m_cached_distances[get_swap(vertex1, vertex2)] = 1; +} + +size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { + if (vertex1 == vertex2) { + return 0; + } + // Automatically set to zero if it doesn't exist yet. + auto& distance_entry = m_cached_distances[get_swap(vertex1, vertex2)]; + if (distance_entry == 0) { + const auto& arch = m_arch_mapping.get_architecture(); + distance_entry = arch.get_distance( + m_arch_mapping.get_node(vertex1), m_arch_mapping.get_node(vertex2)); + + // This message should no longer be triggered for disconnected + // architectures, since get_distance now should throw if v1, v2 are in + // different connected components. However, leave the check in, in case some + // other bizarre error causes distance zero to be returned. + if (distance_entry == 0) { + std::stringstream ss; + ss << "DistancesFromArchitecture: architecture has " << arch.n_uids() + << " vertices, " << arch.n_connections() + << " edges; returned diameter " << arch.get_diameter() << ", but d(" + << vertex1 << "," << vertex2 + << ")=0. " + "Is the graph connected?"; + throw std::runtime_error(ss.str()); + } + } + return distance_entry; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.hpp b/tket/src/TokenSwapping/DistancesFromArchitecture.hpp new file mode 100644 index 0000000000..a6c2c33ed4 --- /dev/null +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.hpp @@ -0,0 +1,76 @@ +#ifndef _TKET_TokenSwapping_DistancesFromArchitecture_H_ +#define _TKET_TokenSwapping_DistancesFromArchitecture_H_ + +#include "ArchitectureMapping.hpp" +#include "DistancesInterface.hpp" +#include "TSAUtils/SwapFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** Directly get distances from an architecture object, + * but evaluated lazily. + */ +class DistancesFromArchitecture : public DistancesInterface { + public: + /** The ArchitectureMapping object already handles the Node <-> vertex size_t + * conversion. + * @param arch_mapping Object containing a reference to an Architecture, + * which has decided upon Node <-> vertex size_t conversions. + */ + explicit DistancesFromArchitecture(const ArchitectureMapping& arch_mapping); + + /** Get the distance from v1 to v2. Throws if distinct vertices return + * distance 0, which probably means a disconnected graph. + * @param vertex1 First vertex + * @param vertex2 Second vertex + * @return distance from v1 to v2 within the Architecture graph, throwing if + * they are disconnected (so the distance is +infinity). + */ + virtual size_t operator()(size_t vertex1, size_t vertex2) override; + + /** May save computation time later; by some method, the caller + * has determined a path from v1 to v2, and hence all along the path + * we know the distance between any two points. + * However, avoids quadratic time blowup by discarding some information + * for long paths. + * @param path A sequence [v0,v1, v2, ..., vn] of vertices, KNOWN to be a + * shortest path from v0 to vn. The caller must not call this without being + * SURE that it really is a shortest path, or incorrect results may occur. + */ + virtual void register_shortest_path(const std::vector& path) override; + + /** The caller has determined that v1, v2 are adjacent, and therefore + * the distance from v1 to v2 equals one. Store this. + * @param vertex1 First vertex + * @param vertex2 Second vertex + */ + virtual void register_edge(size_t vertex1, size_t vertex2) override; + + private: + /** Reference to the original object passed into the constructor; + * the caller must ensure that it remains valid and unchanged. + */ + const ArchitectureMapping& m_arch_mapping; + + /** The key is the vertex pair (v1, v2), but always sorted with v1 m_cached_distances; + + /** The main register_shortest_path wraps around this; we want to avoid + * quadratic timings growth by cutting off long paths. + * This stores the quadratic number of distances between all vertex pairs + * within the given subpath. + * @param path A sequence [v0,v1, v2, ..., vn] of vertices, + * KNOWN to be a shortest path from v0 to vn. + * @param begin The first index in path to use. + * @param end Like end(), an index one past the last index in path to use. + */ + void register_shortest_path_with_limits( + const std::vector& path, size_t begin, size_t end); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/DistancesInterface.cpp b/tket/src/TokenSwapping/DistancesInterface.cpp new file mode 100644 index 0000000000..ab19564597 --- /dev/null +++ b/tket/src/TokenSwapping/DistancesInterface.cpp @@ -0,0 +1,25 @@ +#include "DistancesInterface.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +void DistancesInterface::register_shortest_path( + const vector& /*path*/) {} + +void DistancesInterface::register_neighbours( + size_t vertex, const vector& neighbours) { + for (size_t nv : neighbours) { + register_edge(vertex, nv); + } +} + +void DistancesInterface::register_edge(size_t /*vertex1*/, size_t /*vertex2*/) { +} + +DistancesInterface::~DistancesInterface() {} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/DistancesInterface.hpp b/tket/src/TokenSwapping/DistancesInterface.hpp new file mode 100644 index 0000000000..efff6c7717 --- /dev/null +++ b/tket/src/TokenSwapping/DistancesInterface.hpp @@ -0,0 +1,56 @@ +#ifndef _TKET_TokenSwapping_DistancesInterface_H_ +#define _TKET_TokenSwapping_DistancesInterface_H_ + +#include +#include + +namespace tket { +namespace tsa_internal { + +/** What is the distance between any two vertices on a graph? + * To save time and cope with larger, sparse graphs, it may + * calculate distances only when required. + */ +class DistancesInterface { + public: + /** Not const because there might be caching, dynamic stuff going on. + * Find the distance between v1,v2. + * @param vertex1 First vertex + * @param vertex2 Second vertex + * @return distance from v1 to v2 within the graph. + */ + virtual size_t operator()(size_t vertex1, size_t vertex2) = 0; + + /** If you KNOW a path from v1 to v2 which is shortest, then + * extra information about distances can be deduced from subpaths + * (each subpath must also be a shortest path: otherwise, the whole path + * would not be of minimum length). + * Does nothing unless overridden. + * @param path A sequence [v0,v1, v2, ..., vn] of vertices, KNOWN to be a + * shortest path from v0 to vn. The caller must not call this without being + * SURE that it really is a shortest path, or incorrect results may occur. + */ + virtual void register_shortest_path(const std::vector& path); + + /** If you know the neighbours of a vertex, you can tell this class + * and it MIGHT choose to cache the distances. + * Simply calls register_neighbours(v1, v2) repeatedly, unless overridden. + * @param vertex A vertex. + * @param neighbours A list of vertices adjacent to the given vertex. + */ + virtual void register_neighbours( + size_t vertex, const std::vector& neighbours); + + /** Does nothing unless overridden. Stores the fact that v1,v2 are adjacent, + * to save later recalculation. + * @param vertex1 First vertex + * @param vertex2 Second vertex + */ + virtual void register_edge(size_t vertex1, size_t vertex2); + + virtual ~DistancesInterface(); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/DynamicTokenTracker.cpp b/tket/src/TokenSwapping/DynamicTokenTracker.cpp new file mode 100644 index 0000000000..bf79857095 --- /dev/null +++ b/tket/src/TokenSwapping/DynamicTokenTracker.cpp @@ -0,0 +1,65 @@ +#include "DynamicTokenTracker.hpp" + +; + +namespace tket { +namespace tsa_internal { + +void DynamicTokenTracker::clear() { m_vertex_to_token.clear(); } + +void DynamicTokenTracker::reset() { + for (auto& entry : m_vertex_to_token) { + entry.second = entry.first; + } +} + +Swap DynamicTokenTracker::do_vertex_swap(const Swap& swap) { + const auto v1 = swap.first; + const auto v2 = swap.second; + const auto t1 = get_token_at_vertex(v1); + const auto t2 = get_token_at_vertex(v2); + m_vertex_to_token[v1] = t2; + m_vertex_to_token[v2] = t1; + return get_swap(t1, t2); +} + +bool DynamicTokenTracker::equal_vertex_permutation_from_swaps( + const DynamicTokenTracker& other) const { + return tokens_here_have_equal_locations_in_the_other_object(other) && + other.tokens_here_have_equal_locations_in_the_other_object(*this); +} + +bool DynamicTokenTracker::tokens_here_have_equal_locations_in_the_other_object( + const DynamicTokenTracker& other) const { + for (const auto& vertex_token_pair : m_vertex_to_token) { + const auto vertex = vertex_token_pair.first; + const auto token = vertex_token_pair.second; + const auto citer = other.m_vertex_to_token.find(vertex); + + if (citer == other.m_vertex_to_token.cend()) { + // If it's unmentioned by the other, then the vertex MUST be fixed + // to give the same permutation. + // Otherwise, the other object doesn't know where the token moved to. + if (vertex != token) { + return false; + } + } else { + if (token != citer->second) { + return false; + } + } + } + return true; +} + +size_t DynamicTokenTracker::get_token_at_vertex(size_t vertex) { + const auto iter = m_vertex_to_token.find(vertex); + if (iter == m_vertex_to_token.end()) { + m_vertex_to_token[vertex] = vertex; + return vertex; + } + return iter->second; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/DynamicTokenTracker.hpp b/tket/src/TokenSwapping/DynamicTokenTracker.hpp new file mode 100644 index 0000000000..b53386ad99 --- /dev/null +++ b/tket/src/TokenSwapping/DynamicTokenTracker.hpp @@ -0,0 +1,82 @@ +#ifndef _TKET_TokenSwapping_DynamicTokenTracker_H_ +#define _TKET_TokenSwapping_DynamicTokenTracker_H_ + +#include "TSAUtils/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** Tracks which token is on which vertex; + * every vertex has a different token. + * Only intended for a specific optimisation pass in SwapListOptimiser. + * Does not require contiguous vertex numbers or token numbers. + * Does not require to be initialised with all vertices at the start. + * Thus, operations take time O(log N), with N being the current number + * of vertices seen, NOT the total number of vertices. + * The tokens are "artificial", i.e. nothing to do with an actual + * Token Swapping problem; they are there to track full vertex mappings + * induced by a sequence of swaps. + */ +class DynamicTokenTracker { + public: + /** Call before starting a new sequence of swaps. */ + void clear(); + + /** Logically the same effect as clear, but doesn't actually clear. + * Instead, fills existing map entries. + * Should be a bit faster for many reuses than clearing every time, + * because it will need fewer tree rebalances inside the maps. + */ + void reset(); + + /** Swap the tokens at the given vertices, + * and return the TOKENS that were swapped. + * Note that every vertex is assumed initially to have a token + * with the same vertex value (i.e., the token equals the INITIAL + * vertex). Thus we don't need to know in advance which vertices + * exist, they will be lazily stored only when needed. + * @param swap The two vertices to be swapped. + * @return The two TOKENS on those vertices which were swapped. + */ + Swap do_vertex_swap(const Swap& swap); + + /** Checks if the swap sequence performed on the other tracker object + * results in the same vertex permutation. + * This is NOT the same as just checking equality of data, + * because a vertex could be unmentioned in our sequence, + * and thus not appear anywhere internally; but in the other sequence + * it could appear, but end up back where it started. + * @param other Another DynamicTokenTracker object + * @return Whether the swaps performed on this object and the other object + * resulted in the same vertex permutation on the whole graph + * (remembering that some vertices may be mentioned in one object + * but not the other). + */ + bool equal_vertex_permutation_from_swaps( + const DynamicTokenTracker& other) const; + + private: + VertexMapping m_vertex_to_token; + + /** Get the token, but if it doesn't already exist, create it. + * @param vertex The vertex + * @return The token at that vertex, or equal to the vertex number + * IF it doesn't already exist. + */ + size_t get_token_at_vertex(size_t vertex); + + /** Does every token mentioned in this object lie at the same vertex in + * the other object? + * @param other Another DynamicTokenTracker object + * @return Whether all tokens mentioned by this object have + * the same location according to the other object (remembering + * that unmentioned vertices are implicitly assumed to have equal tokens + * lying on them initially). + */ + bool tokens_here_have_equal_locations_in_the_other_object( + const DynamicTokenTracker& other) const; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/HybridTsa00.cpp b/tket/src/TokenSwapping/HybridTsa00.cpp new file mode 100644 index 0000000000..45992b8367 --- /dev/null +++ b/tket/src/TokenSwapping/HybridTsa00.cpp @@ -0,0 +1,45 @@ +#include "HybridTsa00.hpp" + +#include "TSAUtils/DistanceFunctions.hpp" +#include "Utils/Assert.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +HybridTsa00::HybridTsa00() { + m_name = "HybridTSA_00"; + m_trivial_tsa.set(TrivialTSA::Options::BREAK_AFTER_PROGRESS); +} + +CyclesPartialTsa& HybridTsa00::get_cycles_tsa_for_testing() { + return m_cycles_tsa; +} + +TrivialTSA& HybridTsa00::get_trivial_tsa_for_testing() { return m_trivial_tsa; } + +void HybridTsa00::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + PathFinderInterface& path_finder) { + const auto initial_L = get_total_home_distances(vertex_mapping, distances); + for (size_t counter = initial_L + 1; counter > 0; --counter) { + const auto swaps_before = swaps.size(); + m_cycles_tsa.append_partial_solution( + swaps, vertex_mapping, distances, neighbours, path_finder); + + m_trivial_tsa.append_partial_solution( + swaps, vertex_mapping, distances, neighbours, path_finder); + + if (swaps_before == swaps.size()) { + TKET_ASSERT(all_tokens_home(vertex_mapping)); + return; + } + } + TKET_ASSERT(!"hybrid TSA termination"); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/HybridTsa00.hpp b/tket/src/TokenSwapping/HybridTsa00.hpp new file mode 100644 index 0000000000..11b911bc26 --- /dev/null +++ b/tket/src/TokenSwapping/HybridTsa00.hpp @@ -0,0 +1,51 @@ +#ifndef _TKET_TokenSwapping_HybridTsa00_H_ +#define _TKET_TokenSwapping_HybridTsa00_H_ + +#include "CyclesPartialTsa.hpp" +#include "TrivialTSA.hpp" + +namespace tket { +namespace tsa_internal { + +/** A full end-to-end TSA, combining the partial cycles TSA + * (hopefully good) with the full "trivial" TSA (not so good). + */ +class HybridTsa00 : public PartialTsaInterface { + public: + HybridTsa00(); + + /** For the current token configuration, calculate a sequence of swaps + * to move all tokens home, and append them to the given list. + * As this is a full TSA, it guarantees to find a solution. + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping. + * @param distances An object to calculate distances between vertices. + * @param neighbours An object to calculate adjacent vertices to any given + * vertex. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. + */ + virtual void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + PathFinderInterface& path_finder) override; + + /** Only for experiments; will be removed again + * once the best parameter combinations are found! + * @return A reference to the internal TSA object, to change parameters. + */ + CyclesPartialTsa& get_cycles_tsa_for_testing(); + + /** Temporary; only for experiments! + * @return A reference to the internal TSA object, to change parameters. + */ + TrivialTSA& get_trivial_tsa_for_testing(); + + private: + CyclesPartialTsa m_cycles_tsa; + TrivialTSA m_trivial_tsa; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp new file mode 100644 index 0000000000..d52b33e586 --- /dev/null +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp @@ -0,0 +1,57 @@ +#include "NeighboursFromArchitecture.hpp" + +#include +#include +#include + +; + +namespace tket { +namespace tsa_internal { + +NeighboursFromArchitecture::NeighboursFromArchitecture( + const ArchitectureMapping& arch_mapping) + : m_arch_mapping(arch_mapping) {} + +const std::vector& NeighboursFromArchitecture::operator()( + size_t vertex) { + const auto num_vertices = m_arch_mapping.number_of_vertices(); + if (vertex >= num_vertices) { + std::stringstream ss; + ss << "get_neighbours: invalid vertex " << vertex << " (only have " + << num_vertices << " vertices)"; + throw std::runtime_error(ss.str()); + } + auto& neighbours = m_cached_neighbours[vertex]; + if (!neighbours.empty()) { + // Already cached. + return neighbours; + } + + // OK, if a vertex is isolated (has no neighbours) then this is wasteful; + // however this case should almost never occur in practice. + + const auto& source_node = m_arch_mapping.get_node(vertex); + const auto neighbour_nodes = + m_arch_mapping.get_architecture().get_neighbour_uids(source_node); + + neighbours.reserve(neighbour_nodes.size()); + + for (const Node& node : neighbour_nodes) { + const auto neighbour_vertex = m_arch_mapping.get_vertex(node); + if (neighbour_vertex == vertex) { + std::stringstream ss; + ss << "get_neighbours: vertex " << vertex << " for node " << node.repr() + << " has " << neighbour_nodes.size() + << " neighbours, and lists itself as a neighbour (loops not " + "allowed)"; + throw std::runtime_error(ss.str()); + } + neighbours.push_back(neighbour_vertex); + } + std::sort(neighbours.begin(), neighbours.end()); + return neighbours; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.hpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.hpp new file mode 100644 index 0000000000..7cf6b16550 --- /dev/null +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.hpp @@ -0,0 +1,39 @@ +#ifndef _TKET_TokenSwapping_NeighboursFromArchitecture_H_ +#define _TKET_TokenSwapping_NeighboursFromArchitecture_H_ + +#include "ArchitectureMapping.hpp" +#include "NeighboursInterface.hpp" + +namespace tket { +namespace tsa_internal { + +/** Stores and returns upon request the adjacent vertices to a given vertex + * on a graph, using an underlying Architecture object. + */ +class NeighboursFromArchitecture : public NeighboursInterface { + public: + /** The objects must remain valid AND unchanged + * for the lifetime of this object. + * @param arch_mapping An object which contains a reference to an + * Architecture object internally, and handles Node -> vertex size_t + * conversions. + */ + explicit NeighboursFromArchitecture(const ArchitectureMapping& arch_mapping); + + /** For extra convenience, the list of neighbours is always sorted + * in increasing order (so you can do binary search, etc.) + * @param vertex A vertex. + * @return A sorted list of all adjacent vertices, stored internally. + */ + virtual const std::vector& operator()(size_t vertex) override; + + private: + const ArchitectureMapping& m_arch_mapping; + + /** The key is the vertex, the value is the list of neighbours. */ + std::map> m_cached_neighbours; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/NeighboursInterface.cpp b/tket/src/TokenSwapping/NeighboursInterface.cpp new file mode 100644 index 0000000000..ad91f163c8 --- /dev/null +++ b/tket/src/TokenSwapping/NeighboursInterface.cpp @@ -0,0 +1,15 @@ +#include "NeighboursInterface.hpp" + +#include "Utils/Exceptions.hpp" + +namespace tket { +namespace tsa_internal { + +const std::vector& NeighboursInterface::operator()(size_t) { + throw NotImplemented("NeighboursInterface::get_neighbours: not implemented"); +} + +NeighboursInterface::~NeighboursInterface() {} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/NeighboursInterface.hpp b/tket/src/TokenSwapping/NeighboursInterface.hpp new file mode 100644 index 0000000000..68fb2335ed --- /dev/null +++ b/tket/src/TokenSwapping/NeighboursInterface.hpp @@ -0,0 +1,34 @@ +#ifndef _TKET_TokenSwapping_NeighboursInterface_H_ +#define _TKET_TokenSwapping_NeighboursInterface_H_ + +#include +#include + +namespace tket { +namespace tsa_internal { + +/** What are the adjacent vertices to a given vertex on a graph? + * For larger, sparse graphs, it might + * calculate and store neighbours only when required. + */ +class NeighboursInterface { + public: + /** Returns the neighbours of the given vertex. + * The vector of neighbours is required to be stored internally. + * However, no guarantee that the reference will remain valid + * once another function call occurs. + * By default, throws (not implemented). + * (It's useful to be able to create a "null" object like this, + * because some algorithms don't actually need a neighbours object, + * but others do). + * @param vertex A vertex. + * @return A sorted list of all adjacent vertices, stored internally. + */ + virtual const std::vector& operator()(size_t vertex); + + virtual ~NeighboursInterface(); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/PartialTsaInterface.cpp b/tket/src/TokenSwapping/PartialTsaInterface.cpp new file mode 100644 index 0000000000..90a3320212 --- /dev/null +++ b/tket/src/TokenSwapping/PartialTsaInterface.cpp @@ -0,0 +1,11 @@ +#include "PartialTsaInterface.hpp" + +#include "Utils/Exceptions.hpp" + +namespace tket { +namespace tsa_internal { + +const std::string& PartialTsaInterface::name() const { return m_name; } + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/PartialTsaInterface.hpp b/tket/src/TokenSwapping/PartialTsaInterface.hpp new file mode 100644 index 0000000000..61abddb57f --- /dev/null +++ b/tket/src/TokenSwapping/PartialTsaInterface.hpp @@ -0,0 +1,56 @@ +#ifndef _TKET_TokenSwapping_PartialTsaInterface_H_ +#define _TKET_TokenSwapping_PartialTsaInterface_H_ + +#include "DistancesInterface.hpp" +#include "NeighboursInterface.hpp" +#include "PathFinderInterface.hpp" +#include "TSAUtils/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** TSA stands for Token Swapping Algorithm. + * A "partial TSA" is allowed to give up (not calculate any swaps), + * even when the tokens are not all home. + * The hope is that different partial TSAs can be combined to give + * a good full TSA (i.e., one which always finds a complete solution). + */ +class PartialTsaInterface { + public: + /** The algorithm is allowed to fail (not calculate any swaps), + * but when it DOES return swaps, it is required to decrease L + * (the sum of the distances of each vertex containing a token + * from its target vertex). + * Thus progress is always nonnegative. + * Of course, a complete TSA is a special case. + * @param swaps The list of swaps to append to (does not clear first). + * @param vertex_mapping The current desired mapping. Each key is the + * current vertex where a token is; its value is the target vertex + * the token wants to reach. Usually, will be updated upon return to be the + * new configuration after performing the swaps. + * @param distances An object to calculate distances between vertices. + * @param neighbours An object to calculate adjacent vertices to any given + * vertex. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. (Of course, paths might not be unique if the graph + * is not a tree, so it is an important part of the heuristics that + * the returned paths are fairly "consistent", i.e. "nearby" vertex pairs + * should return "nearby" paths). + */ + virtual void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + PathFinderInterface& path_finder) = 0; + + /** For debugging purposes, every TSA object has a name. + * @return The name of this object (not necessarily unique). + */ + const std::string& name() const; + + protected: + std::string m_name; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/PathFinderInterface.cpp b/tket/src/TokenSwapping/PathFinderInterface.cpp new file mode 100644 index 0000000000..ac647f1f38 --- /dev/null +++ b/tket/src/TokenSwapping/PathFinderInterface.cpp @@ -0,0 +1,27 @@ +#include "PathFinderInterface.hpp" + +#include "Utils/Exceptions.hpp" + +namespace tket { +namespace tsa_internal { + +PathFinderInterface::PathFinderInterface() : m_name("Empty") {} + +PathFinderInterface::~PathFinderInterface() {} + +const std::vector& PathFinderInterface::operator()( + size_t /*vertex1*/, size_t /*vertex2*/) { + throw NotImplemented("PathFinderInterface: get path"); +} + +const std::string& PathFinderInterface::name() const { return m_name; } + +void PathFinderInterface::reset() {} + +void PathFinderInterface::register_edge( + size_t /*vertex1*/, size_t /*vertex2*/) {} + +bool PathFinderInterface::edge_registration_has_effect() const { return false; } + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/PathFinderInterface.hpp b/tket/src/TokenSwapping/PathFinderInterface.hpp new file mode 100644 index 0000000000..b0a3200642 --- /dev/null +++ b/tket/src/TokenSwapping/PathFinderInterface.hpp @@ -0,0 +1,74 @@ +#ifndef _TKET_TokenSwapping_PathFinderInterface_H_ +#define _TKET_TokenSwapping_PathFinderInterface_H_ + +#include +#include + +namespace tket { +namespace tsa_internal { + +/** What is SOME shortest path between vertices? + * This might involve an arbitrary choice, + * because some paths will not be unique if the graph is not a tree. + * For algorithms, we might need to choose in a vaguely consistent way, + * and use a random number generator. + */ +class PathFinderInterface { + public: + PathFinderInterface(); + + /** By default, simply throws (not implemented). + * Returns a shortest path from v1 to v2, including v1 at the start + * and v2 at the end. This should usually return the same result for + * (v1, v2) each time it is called, but may change slightly over time. + * Although the path is stored internally, there's no guarantee + * that the reference will remain valid once another function call occurs. + * There's no guarantee that the path for (v1, v2) will be the reverse of + * the path for (v2, v1). + * Could take time O(length of path), if it is built up anew each time. + * @param vertex1 First vertex v1. + * @param vertex2 Second vertex v2. + * @return A list of vertices, starting with v1 and ending with v2, + * giving a shortest path from v1 to v2 (not unique, maybe not constant + * over time, and maybe not a valid reference after any other call). + */ + virtual const std::vector& operator()(size_t vertex1, size_t vertex2); + + virtual ~PathFinderInterface(); + + /** Some path finders use randomness; if so, override this to reset + * the source of randomness to some default seed + * to ensure reproducibility. By default, does nothing. + */ + virtual void reset(); + + /** If some other algorithm has made use of an edge v1-v2, + * without going through this path finder object, + * call this function to inform this object. + * (E.g., some classes remember which previous operator() calls were made, + * and use them to decide future paths when there is a nonunique choice). + * By default, does nothing. + * @param vertex1 First vertex v1. + * @param vertex2 Second vertex v2. + */ + virtual void register_edge(size_t vertex1, size_t vertex2); + + /** For convenience, if "register_edge" does nothing, return false so that the + * caller knows and doesn't waste time repeatedly calling "register_edge". + * @return True if the function "register_edge" has been overridden to do + * something, false if the function does nothing + */ + virtual bool edge_registration_has_effect() const; + + /** For debugging purposes, every object has a name. + * @return The name of the object. + */ + const std::string& name() const; + + protected: + std::string m_name; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/RNG.cpp b/tket/src/TokenSwapping/RNG.cpp new file mode 100644 index 0000000000..ab2c48e70d --- /dev/null +++ b/tket/src/TokenSwapping/RNG.cpp @@ -0,0 +1,162 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "RNG.hpp" + +using std::vector; + +namespace tket { + +size_t RNG::get_size_t(size_t max_value) { + if (max_value == 0) { + return 0; + } + // Raw data; now must convert to a value to return! + const std::uint64_t random_int = m_engine(); + + if (max_value > m_engine.max() / 4) { + // If choosing such a large potential number of values, + // the bias will unavoidably be very bad, + // if only generating a single random int. + // Surely no deterministic function + // f : {0,1,...,N} -> {0,1,...,M} + // can be close to giving a uniform distribution, + // if N != M are both large and nearly equal. + // (Should be a theorem in here somewhere!) + if (max_value >= m_engine.max()) { + // Care! Maybe max_value+1 == 0 by wraparound, + // so we cannot do division by max_value+1 ! + return random_int; + } + return random_int % (max_value + 1); + } + + // NOW we know that max_value+1 won't overflow. + + // Mathematical note on the below: let: + // m = maximum possible value of "random_int" + // w = interval_width + // v = max possible value to return. + // + // Thus, random_int could be one of {0,1,2,...,m}, + // and we must return one of {0,1,2,...,v}. + // + // With int arithmetic, we get w = int((m+1)/(v+1)). + // + // e.g., if m=5, v=2, then w = int(6/3) = 2, + // the possible random_int values are {0,1,2,3,4,5}, + // and this is partitioned into 3 sets: + // {0,1}, {2,3}, {4,5}. + // + // [Since, with int arithmetic, + // int(0/2) = int(1/2) = 0, int(2/2) = int(3/2) = 1, ...] + // + // Because these sets have equal size 2, each of the values 0,1,2 + // has equal probability 1/3 of being returned. + // BUT, what if (m+1)/(v+1) is not an integer? + // + // e.g., m=5, v=3. + // Now, we must partition the set {0,1,2,3,4,5} into 4 subsets. + // With the below algorithm, w=int((5+1)/(3+1)) = 1, so the partition is + // {0}, {1}, {2}, {3,4,5}. + // Notice that 0,1,2 have probabilities 1/6 of being returned, + // but v=3 has probability 3/6 of being returned, quite a large bias. + // + // How bad can it be? In general: + // + // (m+1)/(v+1) - 1 < w <= (m+1)/(v+1). + // Thus + // m-v+1 <= w(v+1) <= m+1. + // + // Now, the random_int sets causing the values 0,1,...,v to be returned are + // + // { 0, 1, ..., w-1} --> returns 0 + // { w, w+1, ..., 2w-1} --> returns 1 + // {2w, 2w+1, ..., 3w-1} --> returns 2 + // .... + // {vw, vw+1, ..., (v+1)w - 1, ... , m } --> returns v + // + // Notice that all sets except the last have size w. + // The last set has size m-vw+1. So, the final value v has + // more ways than the other values 0,1,... to be returned, by a factor of + // + // U = (m-vw+1)/w = (m+1)/w - v. + // + // U is the "bias factor" which we want to be as close to 1 as possible. + // Always, U >= (m+1)/[(m+1)/(v+1)] - v = v+1-v = 1, + // as we already know. Also, + // + // U <= (m+1)(v+1)/(m-v+1) - v. + // + // Let's assume that v << m. + // Then we can expand with a geometric series: + // (m+1)(v+1)/(m-v+1) = (v+1).[1-v/(m+1)]^{-1} + // = (v+1).[1 + v/(m+1) + A] + // = v+1 + v(v+1)/(m+1) + (v+1)A, + // + // where A ~ (v/m)^2, with ~ here meaning + // "roughly equal size, up to constant factors". + // Thus, U <= 1 + v(v+1)/(m+1) + (v+1)A. + // + // So, finally, assume also that v(v+1) << m+1. + // [This is the same as saying v^2 << m, since m = 2^64-1 is very large, + // and thus m+1~m, sqrt(m+1)~sqrt(m)]. + // + // ...then: A ~ (v^2/m) / m << 1/m, (v+1)A << v/m << 1, + // and so U = 1 + C where C << 1. + // + // Thus, the bias towards the max value v is negligible, as required. + + // Divide range into approximately equal widths. + // Notice, we can't do m_engine.max()+1 because it overflows to 0. + // But the chance of getting m_engine.max() is negligibly small anyway. + const std::uint64_t interval_width = + m_engine.max() / + // Doesn't overflow, because of the above checks. + (static_cast(max_value) + 1); + + // interval_width cannot be zero, because we ensured above that + // max_value + 1 <= m_engine.max(). + const size_t result = random_int / interval_width; + + // Modulo arithmetic shouldn't be necessary, but be paranoid, + // in case there are mistakes in the above analysis (very likely!) + return result % (max_value + 1); +} + +size_t RNG::get_size_t(size_t min_value, size_t max_value) { + if (min_value > max_value) { + std::swap(min_value, max_value); + } + return min_value + get_size_t(max_value - min_value); +} + +vector RNG::get_permutation(size_t size) { + vector numbers(size); + for (size_t i = 0; i < size; ++i) { + numbers[i] = i; + } + do_shuffle(numbers); + return numbers; +} + +void RNG::set_seed(size_t seed) { m_engine.seed(seed); } + +bool RNG::check_percentage(size_t percentage) { + // e.g. the numbers {0,1,2,3,4} are 5% + // of the numbers {0,1,...,99}. + return get_size_t(99) < percentage; +} + +} // namespace tket diff --git a/tket/src/TokenSwapping/RNG.hpp b/tket/src/TokenSwapping/RNG.hpp new file mode 100644 index 0000000000..e2fde99a2f --- /dev/null +++ b/tket/src/TokenSwapping/RNG.hpp @@ -0,0 +1,175 @@ +#ifndef _TKET_TokenSwapping_RNG_H_ +#define _TKET_TokenSwapping_RNG_H_ + +#include +#include +#include +#include + +namespace tket { + +// Something like this is needed for proper random test data generation +// if you want to be platform-independent, as the C++ standard is stupid. +// (A major scandal, in my opinion). +// The random engines are mostly guaranteed by the standard, +// but the DISTRIBUTIONS, e.g. uniform_distribution, are NOT +// (i.e., the actual algorithm used to convert a string of bits to a number +// in the range {0,1,2,...,N} is not specified at all by the C++ standard). +// Thus, we are NOT guaranteed to get the same results, even with the same +// (1) engine; (2) initial seed; (3) distribution, +// if we use different platforms (or even different compilers +// on the SAME platform), or even different compiler VERSIONS!!! +// +// The C++ standard as far as I know does not specify ANY distribution +// implementations, not even optionally, so you HAVE to do this yourself, +// even for something as simple as a uniform distribution. +// The same applies to, e.g., std::random_shuffle. + +/** + * TODO: move this, once decided where (I would prefer Utils). + * A random number generator class. + * Of course, this is only for random test data generation, + * definitely NOT suitable for any kind of cryptography! + * Note that there are no functions involving doubles anywhere! + * Actually, double calculations can give very slightly different answers + * across platforms, compilers, compiler optimisation settings; + * the numerical difference is absolutely negligible, + * but it's worth being ultra cautious! (And it's much easier for testing + * to get IDENTICAL results across platforms). + */ +class RNG { + public: + /** + * Return a random integer from 0 to N, inclusive. + * Approximately uniform, if max_value is much less than + * the max possible value that can be returned. + * N << sqrt(max uint64) ~ 2^32 ~ 4e9 will work well. + * See the comments in the cpp file implementation for more detail. + * + * @param max_value The value N which is the (inclusive) maximum value + * which can be returned. + * @return A size_t from the inclusive range {0,1,2,...,N}. + */ + size_t get_size_t(size_t max_value); + + /** + * Returns a number in the inclusive interval, including the endpoints. + * @param min_value The smallest value (inclusive) that can be returned. + * @param max_value The largest value (inclusive) that can be returned. + * @return A size_t from the inclusive range {a, a+1, a+2, ... , b}. + */ + size_t get_size_t(size_t min_value, size_t max_value); + + /** + * I believe that the behaviour on the Mersenne twister random engine + * is guaranteed by the C++ standard, although I'm not 100% sure + * (but it seems to work in tests). + * The standard specifies 5489u as the default initial seed, so it would + * be rather pointless to do that if the bits generated + * were still implementation-dependent. + * @param seed A seed value, to alter the RNG state. + * By default, uses the value specified by the standard. + */ + void set_seed(size_t seed = 5489); + + /** Return true p% of the time. + * (Very quick and dirty, doesn't check for, e.g., 110% effort...) + * As mentioned above, we deliberately DON'T have a function returning + * a uniform double. Sticking to integer values is safest. + * @param percentage The probability of returning true, expressed as + * a percentage. + * @return A random bool, returns true with specified probability. + */ + bool check_percentage(size_t percentage); + + /** + * Simply shuffle the elements around at random. + * Approximately uniform "in practice" over all possible permutations. + * (Although of course, strictly speaking very far from uniform for larger + * vectors. The number of possible permutations grows very rapidly + * and quickly becomes larger than the total number of distinct states + * any fixed engine can take, no matter which is used. Thus, for larger + * vectors, only a small proportion of permutations are actually possible). + * This is necessary because C++ random_shuffle is + * implementation-dependent (see above comments). + * @param elements The vector to be shuffled randomly. + */ + template + void do_shuffle(std::vector& elements) { + if (elements.size() < 2) { + return; + } + m_shuffling_data.resize(elements.size()); + for (size_t i = 0; i < m_shuffling_data.size(); ++i) { + m_shuffling_data[i].first = m_engine(); + // Tricky subtle point: without this extra entry to break ties, + // std::sort could give DIFFERENT results across platforms and compilers, + // if the object T allows unequal elements comparing equal. + m_shuffling_data[i].second = i; + } + std::sort( + m_shuffling_data.begin(), m_shuffling_data.end(), + [](const std::pair& lhs, + const std::pair& rhs) { + return lhs.first < rhs.first || + (lhs.first == rhs.first && lhs.second < rhs.second); + }); + // Don't need to make a copy of "elements"! Just do repeated swaps... + for (size_t i = 0; i < m_shuffling_data.size(); ++i) { + const size_t& j = m_shuffling_data[i].second; + if (i != j) { + std::swap(elements[i], elements[j]); + } + } + } + + /** Return a random element from the vector. + * @param elements The vector to be sampled from. + * @return A reference to a random element, approximately uniform. + */ + template + const T& get_element(const std::vector& elements) { + if (elements.empty()) { + throw std::runtime_error("RNG: get_element called on empty vector"); + } + return elements[get_size_t(elements.size() - 1)]; + } + + /** + * Pick out a random element from the vector, copy and return it, + * but also remove that element from the vector (swapping with + * the back for efficiency, i.e. the ordering changes). + * @param elements The vector to be sampled from. + * Decreases size by one each time. + * Time O(1) because the ordering is allowed to change. + * @return A copy of the removed element. + */ + template + T get_and_remove_element(std::vector& elements) { + if (elements.empty()) { + throw std::runtime_error( + "RNG: get_and_remove_element called on empty vector"); + } + size_t index = get_size_t(elements.size() - 1); + const T copy = elements[index]; + elements[index] = elements.back(); + elements.pop_back(); + return copy; + } + + /** Returns the numbers {0,1,2,...,N-1} in some random order. + * @param size The size of the returned vector. + * @return An interval of nonnegative numbers, starting at zero, + * but rearranged randomly. + */ + std::vector get_permutation(size_t size); + + private: + std::mt19937_64 m_engine; + + // Avoids repeated memory reallocation. + std::vector> m_shuffling_data; +}; + +} // namespace tket +#endif \ No newline at end of file diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp new file mode 100644 index 0000000000..8ada732b11 --- /dev/null +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -0,0 +1,179 @@ +#include "RiverFlowPathFinder.hpp" + +#include +#include + +#include "TSAUtils/SwapFunctions.hpp" +#include "Utils/Assert.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +struct RiverFlowPathFinder::Impl { + DistancesInterface& distances_calculator; + NeighboursInterface& neighbours_calculator; + RNG& rng; + + typedef std::uint64_t EdgeCount; + + /** The key is an undirected edge; the value is the number of times + * that edge was already used in any requested path. + * (So, we favour flows in both directions). + * Overflow is basically impossible, but even if it did occur, + * it would not invalidate the results (it just means that some + * paths might change more than expected). + */ + std::map edge_counts; + + struct ArrowData { + size_t end_vertex; + EdgeCount count; + }; + + /** A work vector. When we are trying to expand a path by one step, + * we need to list all those steps which are valid, i.e. reduce + * the distance to the target by one. + */ + vector candidate_moves; + + /// A work vector, will be built up + vector path; + + Impl( + DistancesInterface& distances_interface, + NeighboursInterface& neighbours_interface, RNG& random_generator) + : distances_calculator(distances_interface), + neighbours_calculator(neighbours_interface), + rng(random_generator) {} + + void reset(); + + /// Increases nonempty "path" towards the target vertex. + void grow_path(size_t target_vertex, size_t required_path_size); + + /// Once "path" has been filled, update the counts (so that future paths + /// through similar vertices are more likely to overlap). + void update_data_with_path(); +}; + +void RiverFlowPathFinder::Impl::reset() { + for (auto& entry : edge_counts) { + entry.second = 0; + } + rng.set_seed(); +} + +void RiverFlowPathFinder::Impl::grow_path( + size_t target_vertex, size_t required_path_size) { + TKET_ASSERT(path.size() < required_path_size); + TKET_ASSERT(!path.empty()); + + // We don't yet know how to move on, so we must choose a neighbour. + // All candidates will have the same edge count. + candidate_moves.clear(); + + const auto remaining_distance = required_path_size - path.size(); + const auto& neighbours = neighbours_calculator(path.back()); + distances_calculator.register_neighbours(path.back(), neighbours); + + for (size_t neighbour : neighbours) { + const auto neighbour_distance_to_target = + distances_calculator(neighbour, target_vertex); + + if (neighbour_distance_to_target == remaining_distance - 1) { + // Notice that nonexistent entries will be automatically set + // to have count 0, by the C++ standard. + const auto edge_count = edge_counts[get_swap(path.back(), neighbour)]; + if (!candidate_moves.empty()) { + // We'll only add candidates with the same count or higher. + if (candidate_moves[0].count > edge_count) { + continue; + } + if (candidate_moves[0].count < edge_count) { + candidate_moves.clear(); + } + } + candidate_moves.emplace_back(); + candidate_moves.back().end_vertex = neighbour; + candidate_moves.back().count = edge_count; + continue; + } + if (neighbour_distance_to_target != remaining_distance && + neighbour_distance_to_target != remaining_distance + 1) { + std::stringstream ss; + ss << "d(v_" << path.back() << ", v_" << target_vertex + << ")=" << remaining_distance << ". But v_" << path.back() + << " has neighbour v_" << neighbour << ", at distance " + << neighbour_distance_to_target << " to the target v_" + << target_vertex; + throw std::runtime_error(ss.str()); + } + } + if (candidate_moves.empty()) { + std::stringstream ss; + ss << "No neighbours of v_" << path.back() << " at correct distance " + << remaining_distance - 1 << " to target vertex v_" << target_vertex; + throw std::runtime_error(ss.str()); + } + const auto& choice = rng.get_element(candidate_moves); + path.push_back(choice.end_vertex); +} + +void RiverFlowPathFinder::Impl::update_data_with_path() { + for (size_t ii = 1; ii < path.size(); ++ii) { + // Nonexistent counts automatically set to 0 initially + ++edge_counts[get_swap(path[ii - 1], path[ii])]; + } + distances_calculator.register_shortest_path(path); +} + +RiverFlowPathFinder::RiverFlowPathFinder( + DistancesInterface& distances_interface, + NeighboursInterface& neighbours_interface, RNG& rng) + : m_pimpl(std::make_unique( + distances_interface, neighbours_interface, rng)) { + m_name = "RiverFlow"; +} + +RiverFlowPathFinder::~RiverFlowPathFinder() {} + +void RiverFlowPathFinder::reset() { m_pimpl->reset(); } + +const vector& RiverFlowPathFinder::operator()( + size_t vertex1, size_t vertex2) { + m_pimpl->path.clear(); + m_pimpl->path.push_back(vertex1); + if (vertex1 == vertex2) { + return m_pimpl->path; + } + + // We must build up the path. + // The number of vertices including the source and target. + const size_t final_path_size = + 1 + m_pimpl->distances_calculator(vertex1, vertex2); + + for (size_t infinite_loop_guard = 10 * final_path_size; + infinite_loop_guard != 0; --infinite_loop_guard) { + m_pimpl->grow_path(vertex2, final_path_size); + if (m_pimpl->path.size() == final_path_size) { + TKET_ASSERT(m_pimpl->path.back() == vertex2); + m_pimpl->update_data_with_path(); + return m_pimpl->path; + } + } + throw std::runtime_error("get path - dropped out of loop"); +} + +void RiverFlowPathFinder::register_edge(size_t vertex1, size_t vertex2) { + // Automatically zero if the edge doesn't exist. + auto& edge_count = m_pimpl->edge_counts[get_swap(vertex1, vertex2)]; + ++edge_count; +} + +bool RiverFlowPathFinder::edge_registration_has_effect() const { return true; } + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.hpp b/tket/src/TokenSwapping/RiverFlowPathFinder.hpp new file mode 100644 index 0000000000..901ae73310 --- /dev/null +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.hpp @@ -0,0 +1,86 @@ +#ifndef _TKET_TokenSwapping_RiverFlowPathFinder_H_ +#define _TKET_TokenSwapping_RiverFlowPathFinder_H_ + +#include +#include +#include + +#include "DistancesInterface.hpp" +#include "NeighboursInterface.hpp" +#include "PathFinderInterface.hpp" +#include "RNG.hpp" + +namespace tket { +namespace tsa_internal { + +/** Think of flowing water: if it has already flowed through, it creates + * channels along which it is more likely to flow next time. + * We do a similar idea: the PURPOSE is to try to make paths overlap; + * if we move tokens along paths with many edges in common, it is more likely + * that some basic swap optimisation will reduce the number of swaps. + * (Disjoint swaps are the worst kind to optimise, of course; + * no reduction is possible). + * + * This is supposed to be reasonably fast. Repeated calls to operator()(v1,v2) + * are likely to return the same path, but may change slightly over time. + */ +class RiverFlowPathFinder : public PathFinderInterface { + public: + /** All the objects should remain valid throughout + * the lifetime of this object. + * @param distances An object to calculate distances between vertices. + * @param neighbours An object to calculate adjacent vertices to any given + * vertex. + * @param rng A source of (pseudo) randomness. + */ + RiverFlowPathFinder( + DistancesInterface& distances, NeighboursInterface& neighbours, RNG& rng); + + /** For reuse in different problems (but still the same architecture; + * the same "distances" and "neighbours" objects are used), + * but constructing paths anew + * (which is appropriate because completely different problems will + * probably need different paths). This also resets the RNG with its + * default seed, for better reproducibility. + * + * (This may be suitable for simulated annealing-type algorithms + * which involve solving with many different token positions, i.e. + * partially finished problems, even though the end-to-end problem + * is the same). + */ + virtual void reset() override; + + /** Get the path from v1 to v2. As always, may change over time; + * path(v1, v2) is NOT necessarily the reverse of path(v2, v1). + * @param vertex1 First vertex v1. + * @param vertex2 Second vertex v2. + * @return A list of vertices, starting with v1 and ending with v2, + * giving a shortest path from v1 to v2. + */ + virtual const std::vector& operator()( + size_t vertex1, size_t vertex2) override; + + virtual ~RiverFlowPathFinder(); + + /** We really do want to know which edges have been used in the solution so + * far, that's the whole point of this class. + * @param vertex1 First vertex v1 of an edge v1-v2 that was used in the + * solution. + * @param vertex2 Second vertex v2 of the edge. + */ + virtual void register_edge(size_t vertex1, size_t vertex2) override; + + /** Returns true for this object, since we definitely do want to remember + * previous edges. + * @return True, always, for this class. + */ + virtual bool edge_registration_has_effect() const override; + + private: + struct Impl; + std::unique_ptr m_pimpl; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/SwapListOptimiser.cpp b/tket/src/TokenSwapping/SwapListOptimiser.cpp new file mode 100644 index 0000000000..3b8c24ab0f --- /dev/null +++ b/tket/src/TokenSwapping/SwapListOptimiser.cpp @@ -0,0 +1,283 @@ +#include "SwapListOptimiser.hpp" + +#include "TSAUtils/VertexSwapResult.hpp" +#include "Utils/Assert.hpp" + +namespace tket { +namespace tsa_internal { + +void SwapListOptimiser::push_back(SwapList& list, const Swap& swap) { + if (list.empty() || list.back() != swap) { + list.push_back(swap); + return; + } + list.pop_back(); +} + +// It may be that using a std::set is very slightly quicker +// (to store only the vertices containing tokens, as we don't care about the +// targets). However, it's simpler just to use the copied VertexMapping; not +// worth worrying about. (Also, if a std::map is large, then copying all keys +// into a std::set might actually be SLOWER than copying the whole map; +// HOPEFULLY the compiler can copy a whole map very quickly just by copying raw +// bytes, but for a std::set it would have to insert the keys one-by-one and do +// a lot of tree rebalancing). +void SwapListOptimiser::optimise_pass_remove_empty_swaps( + SwapList& list, VertexMapping vertex_mapping) { + auto id_opt = list.front_id(); + while (id_opt) { + const auto id = id_opt.value(); + id_opt = list.next(id); + const VertexSwapResult result(list.at(id), vertex_mapping); + if (result.tokens_moved == 0) { + list.erase(id); + } + } +} + +std::optional +SwapListOptimiser::get_id_of_previous_blocker(SwapList& list, SwapID id) { + const auto& initial_swap = list.at(id); + + // This is the first non-disjoint swap it hits when it moves back. + // Guaranteed to be valid if we drop out of the loop. + SwapID current_id = id; + + bool terminated_correctly = false; + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto prev_id = list.previous(current_id); + if (!prev_id) { + // Right at the front! + return {}; + } + current_id = prev_id.value(); + const auto& new_swap = list.at(current_id); + if (!disjoint(initial_swap, new_swap)) { + // Blocks, OR identical + if (new_swap != initial_swap) { + // It blocks + return current_id; + } + terminated_correctly = true; + break; + } + } + TKET_ASSERT(terminated_correctly); + // It's hit a copy of itself + list.erase(id); + list.erase(current_id); + return {}; +} + +bool SwapListOptimiser::move_swap_towards_front(SwapList& list, SwapID id) { + TKET_ASSERT(list.front_id()); + if (id == list.front_id().value()) { + return false; + } + const auto old_size = list.size(); + const auto previous_blocker_opt = get_id_of_previous_blocker(list, id); + if (old_size != list.size()) { + // The swap was erased! + return true; + } + if (previous_blocker_opt) { + // It can't move all the way to the front. + const ID blocker = previous_blocker_opt.value(); + + // Must be non-null. + const ID previous_id = list.previous(id).value(); + if (blocker != previous_id) { + // Do the move...erase before insert to minimise possible sizes... + const auto swap = list.at(id); + list.erase(id); + const auto new_id = list.insert_after(blocker); + list.at(new_id) = swap; + } + return false; + } + // There was no blocker, so we CAN move all the way to the front + // (and we checked before that we're not already at the front). + const auto swap = list.at(id); + list.erase(id); + list.push_front(swap); + return false; +} + +void SwapListOptimiser::optimise_pass_with_zero_travel(SwapList& list) { + if (list.size() <= 1) { + return; + } + ID current_id = list.front_id().value(); + + // This moves swaps to cancel with previous identical swaps, + // if there is nothing blocking the move. + // However, only worth doing if previous identical swaps do exist; + // leaves them unchanged otherwise. + // We can be sneaky: rather than storing all previous IDs + // for each swap, we store the NUMBER of them; we don't need + // to know the previous location, since the move back + // will check for that anyway. + // + // This probably could be cleverly optimised further + // but would require more thought. + for (auto& entry : m_data) { + // This is quicker than clearing and reinserting; + // no tree rebalancing. + entry.second = 0; + } + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto next_id_opt = list.next(current_id); + + // C++ guarantees nonexistent values will be set to 0. + auto& swap_count = m_data[list.at(current_id)]; + if (swap_count == 0) { + swap_count = 1; + } else { + // There's a possibility of cancellation. + const auto old_size = list.size(); + (void)get_id_of_previous_blocker(list, current_id); + if (old_size == list.size()) { + // No cancellation. + ++swap_count; + } else { + // Cancellation occurred; "get_id_of_previous_blocker" already erased + // both vertex swaps, but didn't update the counts. + --swap_count; + } + } + if (!next_id_opt) { + return; + } + current_id = next_id_opt.value(); + } + TKET_ASSERT(!"optimise_pass_with_zero_travel termination"); +} + +void SwapListOptimiser::optimise_pass_with_frontward_travel(SwapList& list) { + if (list.size() <= 1) { + return; + } + // Start one past the front. + ID current_id = list.front_id().value(); + current_id = list.next(current_id).value(); + + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto next_id_opt = list.next(current_id); + move_swap_towards_front(list, current_id); + if (!next_id_opt) { + return; + } + current_id = next_id_opt.value(); + } + TKET_ASSERT(!"optimise_pass_with_frontward_travel termination"); +} + +void SwapListOptimiser::optimise_pass_with_token_tracking(SwapList& list) { + if (list.size() <= 1) { + return; + } + m_token_tracker.clear(); + optimise_pass_with_token_tracking_without_clearing_tracker(list); +} + +void SwapListOptimiser:: + optimise_pass_with_token_tracking_without_clearing_tracker(SwapList& list) { + if (list.size() <= 1) { + return; + } + // Put a different token at each vertex, and start swapping. + // Now, if a TOKEN swap (rather than vertex swap) + // repeats, then removing this vertex swap together with the preceding one + // in which those two tokens were exchanged gives the same final result. + // This is because, if we don't actually carry out the first swap, + // everything proceeds as before, and all tokens except those two + // are in the same place. When we reach the time of the second swap, + // everything is as before EXCEPT that the two tokens have changed places; + // thus the effect of the second swap + // was merely to interchange those two tokens again. + // + // Now, m_data will store the previous LOCATIONS of vertex swaps. + // + // The actual values of the tokens are irrelevant, + // as long as they are distinct. + + const auto invalid_index = VectorListHybridSkeleton::get_invalid_index(); + + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + // Keep looping until we stop changing. + // The size is always decreasing or unchanged; + // we never insert, only erase. + const auto old_size = list.size(); + if (old_size == 0) { + return; + } + for (auto& entry : m_data) { + entry.second = invalid_index; + } + ID current_id = list.front_id().value(); + bool terminated_correctly = false; + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto& vertex_swap = list.at(current_id); + const auto token_swap = m_token_tracker.do_vertex_swap(vertex_swap); + const auto citer = m_data.find(token_swap); + if (citer != m_data.cend() && citer->second != invalid_index) { + // The swap occurred before, the entry tells us the ID. + // Erase both swaps. + list.erase(citer->second); + list.erase(current_id); + // We have to start at the beginning. + // Changing the labels for these tokens + // messes up other entries between the two swaps. + // A warm restart from the middle of the swap list + // would be a lot of extra complication, not worth it for now. + terminated_correctly = true; + break; + } + // Swap hasn't occurred before, now advance. + m_data[token_swap] = current_id; + const auto next_id_opt = list.next(current_id); + if (!next_id_opt) { + terminated_correctly = true; + break; + } + current_id = next_id_opt.value(); + } + TKET_ASSERT(terminated_correctly); + const auto new_size = list.size(); + if (old_size == new_size) { + return; + } + TKET_ASSERT(new_size < old_size); + } + TKET_ASSERT(!"optimise_pass_with_token_tracking termination"); +} + +void SwapListOptimiser::full_optimise(SwapList& list) { + // More experimentation needed to find the best combination. + optimise_pass_with_zero_travel(list); + m_token_tracker.reset(); + optimise_pass_with_token_tracking_without_clearing_tracker(list); +} + +void SwapListOptimiser::full_optimise( + SwapList& list, const VertexMapping& vertex_mapping) { + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto old_size = list.size(); + full_optimise(list); + optimise_pass_remove_empty_swaps(list, vertex_mapping); + if (old_size == list.size() || list.size() == 0) { + return; + } + TKET_ASSERT(list.size() < old_size); + } + TKET_ASSERT(!"full_optimise termination"); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/SwapListOptimiser.hpp b/tket/src/TokenSwapping/SwapListOptimiser.hpp new file mode 100644 index 0000000000..d01ad080d5 --- /dev/null +++ b/tket/src/TokenSwapping/SwapListOptimiser.hpp @@ -0,0 +1,149 @@ +#ifndef _TKET_TokenSwapping_SwapListOptimiser_H_ +#define _TKET_TokenSwapping_SwapListOptimiser_H_ + +#include "DynamicTokenTracker.hpp" + +namespace tket { +namespace tsa_internal { + +/** Can be reused, faster than constructing a new object. + * (This is intended: the final full algorithm may involve much + * random chopping and changing, simulated annealing, etc. and so + * call this many times). + * This is about directly optimising a list of swaps, + * knowing nothing about target vertices and tokens. + * Each optimisation pass may reorder the swaps and erase some, + * but always such that the resultant start-to-end vertex permutation + * is unchanged. + * (Thus, unmentioned swaps can never be added, because this class + * has no way of knowing if such swaps are possible). + * Thus this can convert bad solutions into better ones, but if our problem + * has some empty tokens, i.e. tokens with no target, which can end up + * anywhere, then algorithms will need that data to get good solutions. + */ +class SwapListOptimiser { + public: + using ID = SwapList::ID; + + /** The most trivial O(1) optimisation: the new swap is added to the back, + * unless it equals the current last swap, in which case they cancel + * each other and are simply removed. However, all other + * optimisation passes also do this as a byproduct. + * @param list The swaps. + * @param swap The single swap to be pushed back to the list (but + * adjacent equal swaps will be erased). + */ + static void push_back(SwapList& list, const Swap& swap); + + /** The slowest but most accurate end-to-end optimisation pass. + * It will include other optimisation passes, so don't bother + * calling them also. This is possibly O(N^3.log N) + * in the worst case (but a clever proof might reduce this), + * but hopefully much faster in practice. + * @param list The swaps to be optimised. + */ + void full_optimise(SwapList& list); + + /** Call when you also know the tokens. The slowest but hopefully best + * optimisation pass, which also removes empty swaps (swaps in which + * neither vertex has a token, so that they have no effect). + * @param list The swaps to be optimised. + * @param vertex_mapping The desired source->token mapping. + */ + void full_optimise(SwapList& list, const VertexMapping& vertex_mapping); + + // Most optimisation passes below are O(N^2.log N) in the worst case, + // but in practice will hopefully be a lot faster. + // It's hard to compare passes; + // for any two passes A, B there are probably examples where + // pass A is better than B, but others where B is better than A. + // Also, passes are not commutative; reordering the passes + // can give different results! Experimentation needed. + + /** Do not move any swaps, unless it cancels with a previous copy of itself + * (in which case, delete both). The fastest pass. + * @param list The swaps to be optimised. + */ + void optimise_pass_with_zero_travel(SwapList& list); + + /** Starting from the front and working along, every swap is moved + * as far towards the front as possible, until it hits a non-disjoint + * swap (so that it cannot pass through it; it doesn't commute), + * or an identical copy of itself (so that they cancel each other). + * The overall reduction should be the same as + * optimise_pass_with_zero_travel, which is cheaper + * (because swaps do not move), but interacting swaps should + * cluster together, which may be useful for certain algorithms. + * @param list The swaps to be optimised. + */ + void optimise_pass_with_frontward_travel(SwapList& list); + + /** Erase two swaps if they do the same TOKEN swap (which means that + * they can be removed). Knows nothing about the problem-specific tokens, + * instead this creates artificial tokens. + * This is slower than optimise_pass_with_zero_travel and + * optimise_pass_with_frontward_travel, but is strictly more powerful + * (any reduction by those passes will also occur with this pass, + * but some additional reductions are possible with this pass. E.g., + * this pass reduces (01)(12)(01)(12)(01)(12), the cube of a 3-cycle, + * to zero swaps, which the other passes cannot, since (01) and (12) + * are not disjoint and hence cannot pass through each other). + * However, NOTE that this pass can introduce EMPTY swaps, w.r.t. + * the problem-specific tokens, so further passes to remove + * problem-specific empty token swaps are necessary + * to get the full reduction. + * @param list The swaps to be optimised. + */ + void optimise_pass_with_token_tracking(SwapList& list); + + /** O(N log N): simply discard any swap between two empty tokens. + * (Recall that optimise_pass_with_token_tracking does NOT know + * about these specific tokens, it creates internal artificial ones + * just for the pass. That pass can be much slower than this pass, + * but also can make some reductions which this pass cannot). + * @param list The swaps to be optimised. + * @param vertex_mapping The desired source->target mapping (so that + * we can determine which vertices have tokens on them). + */ + void optimise_pass_remove_empty_swaps( + SwapList& list, VertexMapping vertex_mapping); + + private: + std::map m_data; + + DynamicTokenTracker m_token_tracker; + + /** What would happen if you tried to move the swap towards the front? + * Doesn't actually move the swap, just returns the ID of the first + * blocking swap it hits (or null if there is none and it could move + * all the way to the front), UNLESS it actually hits another copy of itself, + * in which case it DOES erase (and the caller can tell by checking the + * size). + * @param list The swaps to be optimised. + * @param id The ID of the current swap which we might move frontwards. + * @return The ID of the previous reachable distinct swap which is + * non-disjoint (has a vertex in common, so doesn't commute), + * or empty if none exists. + */ + static std::optional get_id_of_previous_blocker( + SwapList& list, SwapID id); + + /** Actually move the swap as far towards the front as possible until + * blocked, erasing it if it cancelled with another copy of itself. + * @param list The swaps to be optimised. + * @param id The ID of the current swap to move frontwards. + * @return true if the swap cancelled with a copy of itself. + */ + static bool move_swap_towards_front(SwapList& list, SwapID id); + + /** The same as optimise_pass_with_token_tracking, + * but without calling "clear" OR "reset" on m_token_tracker first). + * @param list The swaps to be optimised. + */ + void optimise_pass_with_token_tracking_without_clearing_tracker( + SwapList& list); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp new file mode 100644 index 0000000000..390911aa84 --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp @@ -0,0 +1,28 @@ +#include "DebugFunctions.hpp" + +#include + +namespace tket { +namespace tsa_internal { + +std::string str(const VertexMapping& vertex_mapping) { + std::stringstream ss; + ss << "VM:"; + for (const auto& entry : vertex_mapping) { + ss << " " << entry.first << "->" << entry.second << " "; + } + return ss.str(); +} + +std::string str(const SwapList& swaps) { return str(swaps.to_vector()); } + +std::string str(const std::vector& swaps) { + std::stringstream ss; + for (auto swap : swaps) { + ss << " (" << swap.first << "," << swap.second << ") "; + } + return ss.str(); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.hpp new file mode 100644 index 0000000000..0d8c62bcb7 --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.hpp @@ -0,0 +1,34 @@ +#ifndef _TKET_TokenSwapping_TSAUtils_DebugFunctions_H_ +#define _TKET_TokenSwapping_TSAUtils_DebugFunctions_H_ + +#include + +#include "VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** Get a string representation. + * @param vertex_mapping A mapping, usually representing a desired + * source->target mapping for a Token Swapping problem. + * @return A string representation. + */ +std::string str(const VertexMapping& vertex_mapping); + +/** Get a string representation. + * @param swaps An ordered list of swaps, usually the solution to a Token + * Swapping problem. + * @return A string representation. + */ +std::string str(const SwapList& swaps); + +/** Get a string representation. + * @param swaps An ordered list of swaps, usually the solution to a Token + * Swapping problem. + * @return A string representation. + */ +std::string str(const std::vector& swaps); + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp new file mode 100644 index 0000000000..bd9981248e --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp @@ -0,0 +1,52 @@ +#include "DistanceFunctions.hpp" + +#include +#include + +; + +namespace tket { +namespace tsa_internal { + +size_t get_total_home_distances( + const VertexMapping& vertex_mapping, + DistancesInterface& distances_calculator) { + size_t sum_of_distances = 0; + for (const auto& entry : vertex_mapping) { + sum_of_distances += distances_calculator(entry.first, entry.second); + } + return sum_of_distances; +} + +int get_move_decrease( + const VertexMapping& vertex_mapping, size_t v1, size_t v2, + DistancesInterface& distances) { + const auto citer = vertex_mapping.find(v1); + if (citer == vertex_mapping.cend()) { + return 0; + } + const auto target = citer->second; + const std::intmax_t v1_to_target = distances(v1, target); + const std::intmax_t v2_to_target = distances(v2, target); + return static_cast(v1_to_target - v2_to_target); +} + +int get_swap_decrease( + const VertexMapping& vertex_mapping, size_t v1, size_t v2, + DistancesInterface& distances) { + return get_move_decrease(vertex_mapping, v1, v2, distances) + + get_move_decrease(vertex_mapping, v2, v1, distances); +} + +size_t get_swaps_lower_bound( + const VertexMapping& vertex_mapping, + DistancesInterface& distances_calculator) { + // Each swap decreases the sum by at most 2 (and more likely 1 in many cases, + // if the mapping is sparse), so we need >= sum/2. But it's an integer of + // course. + return (get_total_home_distances(vertex_mapping, distances_calculator) + 1) / + 2; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp new file mode 100644 index 0000000000..4ab6123c4f --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp @@ -0,0 +1,76 @@ +#ifndef _TKET_TokenSwapping_TSAUtils_DistanceFunctions_H_ +#define _TKET_TokenSwapping_TSAUtils_DistanceFunctions_H_ + +#include +#include +#include + +#include "../DistancesInterface.hpp" +#include "VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** The sum of the distances of each nonempty token to its home. + * (This is also referred to as "L" in various places, coming from the 2016 + * paper "Approximation and Hardness of Token Swapping"). + * @param vertex_mapping (current vertex where a token lies)->(target vertex) + * mapping. + * @param distances An object to calculate distances between vertices. + * @return the sum, over all tokens, of (current vertex)->(target vertex) + * distances. + */ +size_t get_total_home_distances( + const VertexMapping& vertex_mapping, DistancesInterface& distances); + +/** For just the abstract move v1->v2, ignoring the token on v2, + * by how much does L (the total distances to home) decrease? + * @param vertex_mapping current source->target mapping. + * @param v1 First vertex. + * @param v2 Second vertex. Not required to be adjacent to v1. + * @param distances An object to calculate distances between vertices. + * @return The amount by which L = get_total_home_distances would decrease, + * IF we moved the token on v1 to v2, IGNORING the token currently on v2 + * (which of course is impossible to do in reality if there is a token on + * v2), and leaving all other tokens unchanged. Doesn't have to be positive, of + * course, although positive numbers are good. + */ +int get_move_decrease( + const VertexMapping& vertex_mapping, size_t v1, size_t v2, + DistancesInterface& distances); + +/** The same as get_move_decrease, but for an abstract swap(v1,v2). + * @param vertex_mapping current source->target mapping. + * @param v1 First vertex. + * @param v2 Second vertex. Not required to be adjacent to v1. + * @param distances An object to calculate distances between vertices. + * @return The amount by which L = get_total_home_distances would decrease, + * (which does not have to be a positive number), + * IF the tokens currently on v1,v2 were swapped, and all other tokens + * left unchanged. + */ +int get_swap_decrease( + const VertexMapping& vertex_mapping, size_t v1, size_t v2, + DistancesInterface& distances); + +/** A simple theoretical lower bound on the number of swaps necessary + * to achieve a given vertex mapping. (Of course it is not always possible + * to achieve this bound. But the algorithm in the 2016 paper + * "Approximation and Hardness of Token Swapping", for example, guarantees + * to find a solution within a factor of 4, or a factor of 2 for trees, + * in the case where every vertex has a token). + * TODO: What happens if some vertices are empty? Not considered in the 2016 + * paper! Need to think about it. This is still a lower bound, but how close? + * @param vertex_mapping current source->target mapping. + * @param distances An object to calculate distances between vertices. + * @return A number S such that every possible solution has >= S swaps. + * However, note that the true minimum value might be larger, but finding + * the value seems about as hard as finding an actual solution, and thus + * is possibly exponentially hard (seems to be unknown, even for trees). + */ +size_t get_swaps_lower_bound( + const VertexMapping& vertex_mapping, DistancesInterface& distances); + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp new file mode 100644 index 0000000000..6eef0b7712 --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp @@ -0,0 +1,39 @@ +#include "GeneralFunctions.hpp" + +#include +#include + +; + +namespace tket { +namespace tsa_internal { + +std::set get_random_set( + RNG& rng, size_t sample_size, size_t population_size) { + if (sample_size > population_size) { + throw std::runtime_error("get_random_set: sample too large"); + } + std::set result; + if (sample_size == 0 || population_size == 0) { + return result; + } + if (sample_size < population_size / 2) { + while (result.size() < sample_size) { + result.insert(rng.get_size_t(population_size - 1)); + } + return result; + } + std::vector elems(population_size); + std::iota(elems.begin(), elems.end(), 0); + rng.do_shuffle(elems); + for (const auto& elem : elems) { + result.insert(elem); + if (result.size() == sample_size) { + return result; + } + } + throw std::runtime_error("get_random_set: dropped out of loop"); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp new file mode 100644 index 0000000000..165b085c29 --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp @@ -0,0 +1,87 @@ +#ifndef _TKET_TokenSwapping_TSAUtils_GeneralFunctions_H_ +#define _TKET_TokenSwapping_TSAUtils_GeneralFunctions_H_ + +// This is for "leftover" functions not specifically linked to token swapping +// which are candidates for being used and moved elsewhere, +// e.g. the main src/Utils folder. + +#include +#include +#include +#include + +#include "../RNG.hpp" + +namespace tket { +namespace tsa_internal { + +/** Returns the value in a map corresponding to a key, IF it exists, + * or an empty optional object if it does not. + * @param map The std::map object. + * @param key The key. + * @return The value if it exists, or an empty optional value if it doesn't. + */ +template +std::optional get_optional_value(const std::map& map, const K& key) { + const auto citer = map.find(key); + if (citer == map.cend()) { + return {}; + } + return citer->second; +} + +/** The key->value mapping is required to be bijective (reversible). + * @param map The std::map object. + * @return Another std::map, with the key->value mappings reversed. + * Throws if the map is not reversible. + */ +template +std::map get_reversed_map(const std::map& map) { + std::map reversed_map; + for (const auto& entry : map) { + reversed_map[entry.second] = entry.first; + } + if (map.size() != reversed_map.size()) { + throw std::runtime_error("get_reversed_map called with non-reversible map"); + } + return reversed_map; +} + +/** Finds the rightmost "one" (least significant bit) + * occurring in the binary expansion of x, an unsigned integer type. + * Returns the bit, whilst also removing it from x. + * @param x The original unsigned integer type, which will have one bit removed + * (or remain at zero if already at zero). + * @return The bit which was removed from x (or 0 if none was removed). + */ +template +static UINT get_rightmost_bit(UINT& x) { + // Standard bit hack: decrementing 10000... gives 01111... + // E.g., consider: + // x = 001101011010000 + // x-1 = 001101011001111 + // ~(x-1) = 110010100110000 + // Therefore, AND x with ~(x-1). + + // No "if" statements; unsigned int wraparound is allowed. + UINT y = x; + --y; + y = ~y; + const UINT bit = (x & y); + x ^= bit; + return bit; +} + +/** Return a random subset of given size from the population {0,1,2,...,N}. + * @param rng A random number generator. + * @param sample_size The desired size of the returned set. + * @param population_size The number of elements in the population (an interval + * of nonnegative integers, starting at 0). + * @return A set of numbers. Throws upon invalid parameters. + */ +std::set get_random_set( + RNG& rng, size_t sample_size, size_t population_size); + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp new file mode 100644 index 0000000000..e5c38aabaa --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp @@ -0,0 +1,29 @@ +#include "SwapFunctions.hpp" + +#include +#include + +; + +namespace tket { +namespace tsa_internal { + +Swap get_swap(size_t v1, size_t v2) { + if (v1 == v2) { + std::stringstream ss; + ss << "get_swap : for equal vertices v1 = v2 = v_" << v1; + throw std::runtime_error(ss.str()); + } + if (v1 < v2) { + return std::make_pair(v1, v2); + } + return std::make_pair(v2, v1); +} + +bool disjoint(const Swap& s1, const Swap& s2) { + return s1.first != s2.first && s1.first != s2.second && + s1.second != s2.first && s1.second != s2.second; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp new file mode 100644 index 0000000000..3437c38fe5 --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp @@ -0,0 +1,36 @@ +#ifndef _TKET_TokenSwapping_TSAUtils_SwapFunctions_H_ +#define _TKET_TokenSwapping_TSAUtils_SwapFunctions_H_ + +#include +#include +#include + +#include "../VectorListHybrid.hpp" + +namespace tket { +namespace tsa_internal { + +typedef std::pair Swap; +typedef VectorListHybrid SwapList; +typedef SwapList::ID SwapID; + +/** No distinction between (v1, v2) and (v2, v1). + * Will ensure that v1 +#include + +#include "../../Utils/Assert.hpp" +#include "VertexSwapResult.hpp" + +; + +namespace tket { +namespace tsa_internal { + +bool all_tokens_home(const VertexMapping& vertex_mapping) { + for (const auto& entry : vertex_mapping) { + if (entry.first != entry.second) { + return false; + } + } + return true; +} + +void check_mapping( + const VertexMapping& vertex_mapping, VertexMapping& work_mapping) { + work_mapping.clear(); + for (const auto& entry : vertex_mapping) { + if (work_mapping.count(entry.second) == 0) { + work_mapping[entry.second] = entry.first; + } else { + std::stringstream ss; + ss << "Vertices v_" << entry.first << " and v_" + << work_mapping[entry.second] << " both have the same target vertex v_" + << entry.second; + throw std::runtime_error(ss.str()); + } + } +} + +void check_mapping(const VertexMapping& vertex_mapping) { + VertexMapping work_mapping; + check_mapping(vertex_mapping, work_mapping); +} + +void append_swaps_to_interchange_path_ends( + const std::vector& path, VertexMapping& vertex_mapping, + SwapList& swap_list) { + if (path.size() < 2 || path.front() == path.back()) { + return; + } + for (size_t ii = path.size() - 1; ii > 0; --ii) { + VertexSwapResult(path[ii], path[ii - 1], vertex_mapping, swap_list); + } + for (size_t ii = 2; ii < path.size(); ++ii) { + VertexSwapResult(path[ii], path[ii - 1], vertex_mapping, swap_list); + } +} + +size_t get_source_vertex( + VertexMapping& source_to_target_map, size_t target_vertex) { + if (source_to_target_map.count(target_vertex) == 0) { + // If it IS a genuine permutation mapping (which we assume), + // then the vertex is as yet unmentioned (and hence unmoved). + source_to_target_map[target_vertex] = target_vertex; + return target_vertex; + } + for (const auto& entry : source_to_target_map) { + if (entry.second == target_vertex) { + return entry.first; + } + } + TKET_ASSERT(!"get_source_vertex"); + return target_vertex; +} + +void add_swap(VertexMapping& source_to_target_map, const Swap& swap) { + const auto source_v1 = get_source_vertex(source_to_target_map, swap.first); + const auto source_v2 = get_source_vertex(source_to_target_map, swap.second); + std::swap(source_to_target_map[source_v1], source_to_target_map[source_v2]); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.hpp new file mode 100644 index 0000000000..00c490163a --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.hpp @@ -0,0 +1,74 @@ +#ifndef _TKET_TokenSwapping_TSAUtils_VertexMappingFunctions_H_ +#define _TKET_TokenSwapping_TSAUtils_VertexMappingFunctions_H_ + +#include +#include +#include + +#include "SwapFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/// The desired result of swapping is to move a token on each "key" +/// vertex to the "value" vertex. +typedef std::map VertexMapping; + +/** Are all tokens on their target vertices? + * @param vertex_mapping The desired mapping. + * @return Whether all tokens are on their target vertices. + */ +bool all_tokens_home(const VertexMapping& vertex_mapping); + +/** Does nothing, except throwing if the mapping is invalid. + * @param vertex_mapping The desired mapping, to be checked. + */ +void check_mapping(const VertexMapping& vertex_mapping); + +/** When you've already got another expendable VertexMapping object, + * it saves time to reuse instead of constructing a new one. + * @param vertex_mapping The desired mapping, to be checked. + * @param work_mapping A disposable object, will be overwritten. + */ +void check_mapping( + const VertexMapping& vertex_mapping, VertexMapping& work_mapping); + +/** We have a path [v(1), v(2), v(3), ..., v(N)]. + * Calculate individual swaps along this path (i.e., using only + * Swap(v(i), v(i+1)) which we know are valid), which would swap the tokens + * (if any) on v(1), v(N), and perform the swaps. + * Only append nonempty swaps (i.e., where at least one token is moved). + * @param path The path (must be an actual possible path), whose start + * and end vertices are to be swapped (with all other vertices) + * @param vertex_mapping The source to target mapping, which will be updated. + * @param swap_list The list of swaps, which will be updated. + */ +void append_swaps_to_interchange_path_ends( + const std::vector& path, VertexMapping& vertex_mapping, + SwapList& swap_list); + +/** Given a source->target vertex mapping and a TARGET vertex, find the + * corresponding source vertex. If the given target vertex does not appear in + * the map, create it as a new fixed vertex, i.e. map[v] = v for the given + * target vertex v. + * @param source_to_target_map A source->target vertex mapping. + * @param target_vertex A target vertex, to find in the map. + * @return The source vertex corresponding to the target (possibly newly created + * if the target was not present). + */ +size_t get_source_vertex( + VertexMapping& source_to_target_map, size_t target_vertex); + +/** We currently have a source->target mapping. Perform the vertex swap, + * but if any vertex in the swap is not present in the map, add it to the map as + * a new source vertex. + * Note that, since we DON'T have a target->source map, we have to do an O(N) + * search to find all target vertices. + * @param source_to_target_map The map to update with the swap. + * @param swap The swap to perform. + */ +void add_swap(VertexMapping& source_to_target_map, const Swap& swap); + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp new file mode 100644 index 0000000000..ec3d345135 --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp @@ -0,0 +1,46 @@ +#include "VertexSwapResult.hpp" + +; + +namespace tket { +namespace tsa_internal { + +VertexSwapResult::VertexSwapResult( + size_t v1, size_t v2, VertexMapping& vertex_mapping, SwapList& swap_list) + : VertexSwapResult(v1, v2, vertex_mapping) { + if (tokens_moved != 0) { + swap_list.push_back(get_swap(v1, v2)); + } +} + +VertexSwapResult::VertexSwapResult( + const Swap& swap, VertexMapping& vertex_mapping) + : VertexSwapResult(swap.first, swap.second, vertex_mapping) {} + +VertexSwapResult::VertexSwapResult( + size_t v1, size_t v2, VertexMapping& vertex_mapping) { + if (vertex_mapping.count(v1) == 0) { + if (vertex_mapping.count(v2) == 0) { + tokens_moved = 0; + return; + } + // No token on the first. + vertex_mapping[v1] = vertex_mapping[v2]; + vertex_mapping.erase(v2); + tokens_moved = 1; + return; + } + // A token on the first. + if (vertex_mapping.count(v2) == 0) { + vertex_mapping[v2] = vertex_mapping[v1]; + vertex_mapping.erase(v1); + tokens_moved = 1; + return; + } + // Tokens on both. + std::swap(vertex_mapping[v1], vertex_mapping[v2]); + tokens_moved = 2; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.hpp b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.hpp new file mode 100644 index 0000000000..9a222cfa35 --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.hpp @@ -0,0 +1,47 @@ +#ifndef _TKET_TokenSwapping_TSAUtils_VertexSwapResult_H_ +#define _TKET_TokenSwapping_TSAUtils_VertexSwapResult_H_ + +#include +#include +#include + +#include "VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** For performing a vertex swap, and checking how many tokens moved. */ +struct VertexSwapResult { + /** How many tokens moved? Must be one of 0,1,2. */ + unsigned tokens_moved; + + /** Carry out the swap on the tokens and get the result. + * @param swap The swap to perform. + * @param vertex_mapping The source to target mapping, + * will be updated with the swap. + */ + VertexSwapResult(const Swap& swap, VertexMapping& vertex_mapping); + + /** Pass in the two vertex size_t numbers directly. + * @param v1 First vertex of the swap to perform. + * @param v2 Second vertex of the swap to perform. + * @param vertex_mapping The source to target mapping, + * will be updated with the swap. + */ + VertexSwapResult(size_t v1, size_t v2, VertexMapping& vertex_mapping); + + /** If the swap moves at least one nonempty token, carry out the swap. + * Otherwise, does nothing. + * @param v1 First vertex of the swap to perform. + * @param v2 Second vertex of the swap to perform. + * @param vertex_mapping The source to target mapping, + * will be updated with the swap. + * @param swap_list The list of swaps, which will be updated with the swap. + */ + VertexSwapResult( + size_t v1, size_t v2, VertexMapping& vertex_mapping, SwapList& swap_list); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp new file mode 100644 index 0000000000..292528cc90 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp @@ -0,0 +1,116 @@ +#include "CanonicalRelabelling.hpp" + +#include +#include + +#include "Utils/Assert.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +CanonicalRelabelling::CanonicalRelabelling() { + // no more than 6 vertices, so no more than 6 cycles ever needed. + m_cycles.resize(6); +} + +const CanonicalRelabelling::Result& CanonicalRelabelling::operator()( + const VertexMapping& desired_mapping) { + m_result.too_many_vertices = false; + m_result.permutation_hash = 0; + m_result.new_to_old_vertices.clear(); + m_result.old_to_new_vertices.clear(); + m_result.identity = all_tokens_home(desired_mapping); + if (m_result.identity) { + return m_result; + } + check_mapping(desired_mapping, m_work_mapping); + if (desired_mapping.size() > 6) { + m_result.too_many_vertices = true; + return m_result; + } + // If not the identity, at least 2 vertices moved. + TKET_ASSERT(desired_mapping.size() >= 2); + TKET_ASSERT(desired_mapping.size() <= 6); + + m_desired_mapping = desired_mapping; + unsigned next_cyc_index = 0; + + while (!m_desired_mapping.empty()) { + // New cycle starts + auto& this_cycle = m_cycles[next_cyc_index]; + ++next_cyc_index; + this_cycle.clear(); + this_cycle.push_back(m_desired_mapping.cbegin()->first); + bool terminated_correctly = false; + for (unsigned infinite_loop_guard = 1 + m_desired_mapping.size(); + infinite_loop_guard != 0; --infinite_loop_guard) { + const auto curr_v = this_cycle.back(); + const auto target_v = m_desired_mapping.at(curr_v); + TKET_ASSERT(m_desired_mapping.erase(curr_v) == 1); + if (target_v == this_cycle[0]) { + terminated_correctly = true; + break; + } + this_cycle.push_back(target_v); + } + TKET_ASSERT(terminated_correctly); + } + // Sort by cycle length, LONGEST cycles first. + // But, also want a "stable-like" sort: + // make a consistent choice across all platforms, + // if cycle lengths are equal, + // based only upon the vertex numbers. + m_sorted_cycles_indices.resize(next_cyc_index); + std::iota(m_sorted_cycles_indices.begin(), m_sorted_cycles_indices.end(), 0); + const auto& cycles = m_cycles; + + std::sort( + m_sorted_cycles_indices.begin(), m_sorted_cycles_indices.end(), + [cycles](unsigned ii, unsigned jj) { + const auto& cyc1 = cycles[ii]; + const auto& cyc2 = cycles[jj]; + return (cyc1.size() > cyc2.size()) || + // Using the raw vertex numbers is, of course, non-canonical, + // but necessary if we are to have stable results + // across ALL nonstable sorting algorithms + // on different platforms/compilers. + ((cyc1.size() == cyc2.size()) && cyc1[0] < cyc2[0]); + }); + + // Now we can set up the mapping. + m_result.new_to_old_vertices.clear(); + for (auto ii : m_sorted_cycles_indices) { + const auto& cyc = m_cycles[ii]; + TKET_ASSERT(!cyc.empty()); + TKET_ASSERT(cyc.size() <= 6); + for (size_t old_v : cyc) { + m_result.new_to_old_vertices.push_back(old_v); + } + } + TKET_ASSERT(m_result.new_to_old_vertices.size() <= 6); + m_result.old_to_new_vertices.clear(); + for (unsigned ii = 0; ii < m_result.new_to_old_vertices.size(); ++ii) { + m_result.old_to_new_vertices[m_result.new_to_old_vertices[ii]] = ii; + } + TKET_ASSERT( + m_result.new_to_old_vertices.size() == + m_result.old_to_new_vertices.size()); + + // And finally, the permutation hash. + m_result.permutation_hash = 0; + for (auto ii : m_sorted_cycles_indices) { + const auto& cyc = m_cycles[ii]; + if (cyc.size() == 1) { + break; + } + m_result.permutation_hash *= 10; + m_result.permutation_hash += cyc.size(); + } + return m_result; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp new file mode 100644 index 0000000000..c51c5a7db4 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp @@ -0,0 +1,97 @@ +#ifndef _TKET_TokenSwapping_TableLookup_CanonicalRelabelling_H_ +#define _TKET_TokenSwapping_TableLookup_CanonicalRelabelling_H_ +#include "../TSAUtils/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +// PERMUTATION HASH EXPLANATION: +// Certain permutations on the vertices [0,1,2,3,4,5] are represented by an +// unsigned value, the "permutation hash". In fact, ANY permutation on a list of +// ANY 6 distinct objects can be reduced to one of these by a suitable vertex +// relabelling, which is what this class is for. +// +// Note that not every permutation on [0,1,2,3,4,5] corresponds to a permutation +// hash (in fact, very few do); most of them still need relabelling, just as for +// arbitrary labels. +// +// The permutation hashes are done by taking a partition of 6, with parts in +// decreasing order, e.g. 6 = 3+2+1 = 4+2 = 3+3 = 2+2+1+1, etc. We remove all +// the 1 entries, and stick the digits together into a single decimal: +// +// 3+2+1 -> 32, 4+2 -> 42, 2+2+1+1 -> 22 +// +// Each digit represents the length of a slice of the 6 elements 012345: +// +// 32 -> (012)(34)(5), 42 -> (0123)(45), 22 -> (01)(23)(4)(5). +// +// The notation (abcd) represents a cyclic shift on the elements a,b,c,d. +// Thus a -> b -> c -> d -> a. +// +// EVERY permutation on 6 ARBITRARY distinct objects is equivalent to one of +// these, after suitable vertex relabelling. This follows because permutations +// can be decomposed into disjoint cycles. +// + +/** Given a permutation with arbitrary vertex labels, currently size <= 6, we + * want to relabel the vertices so that we can look up an isomorphic mapping in + * a table. This class gives one possible way. Still some scope for research and + * improvement here; we want to cut down the number of "isomorphic" copies as + * much as possible (whatever "isomorphic" means in this context) to make the + * lookup table fairly small. + */ +class CanonicalRelabelling { + public: + /** For looking up mappings in the table. */ + struct Result { + /** Will be empty if there are too many vertices. (Current limit is 6, + * although this may be updated in future). */ + VertexMapping old_to_new_vertices; + + /** Element[i], for new vertex i, is the old vertex number which corresponds + * to i. Empty if too many vertices. + */ + std::vector new_to_old_vertices; + + /** Set equal to zero if too many vertices. Any permutation on <= 6 vertices + * is assigned a number, to be looked up in the table. 0 is the identity + * permutation. */ + unsigned permutation_hash; + + /** Were there too many vertices in the mapping to look up in the table? */ + bool too_many_vertices; + + /** Was it the identity mapping? If so, no need to relabel OR look up in a + * table. */ + bool identity; + }; + + /** The returned Result object is stored internally. + * @param desired_mapping A (source vertex) -> (target vertex) permutation on + * arbitrary vertex labels. + * @return An object withe information such as (1) how to relabel vertices; + * (2) The permutation on NEW vertices, for looking up in a table. + */ + const Result& operator()(const VertexMapping& desired_mapping); + + CanonicalRelabelling(); + + private: + Result m_result; + + VertexMapping m_desired_mapping; + VertexMapping m_work_mapping; + + /** The relabelling/permutation hashing is all based upon decomposing an + * arbitrarily labelled permutation into disjoint cycles, then relabelling the + * vertices within the cycles in a reasonable way. + */ + std::vector> m_cycles; + + /** The indices in "m_cycles" after sorting appropriately. */ + std::vector m_sorted_cycles_indices; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp new file mode 100644 index 0000000000..1b48856076 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp @@ -0,0 +1,124 @@ +#include "ExactMappingLookup.hpp" + +#include + +#include "../TSAUtils/GeneralFunctions.hpp" +#include "FilteredSwapSequences.hpp" +#include "SwapConversion.hpp" +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +const ExactMappingLookup::Result& ExactMappingLookup::operator()( + const VertexMapping& desired_mapping, const vector& edges, + unsigned max_number_of_swaps) { + m_result.success = false; + m_result.too_many_vertices = desired_mapping.size() > 6; + m_result.swaps.clear(); + if (m_result.too_many_vertices) { + return m_result; + } + return improve_upon_existing_result( + desired_mapping, edges, max_number_of_swaps); +} + +const ExactMappingLookup::Result& +ExactMappingLookup::improve_upon_existing_result( + const VertexMapping& desired_mapping, const vector& edges, + unsigned max_number_of_swaps) { + max_number_of_swaps = std::min(max_number_of_swaps, 16u); + const auto& relabelling = m_relabeller(desired_mapping); + + if (relabelling.identity) { + // This beats whatever was there before, + // whether or not it was successful. + m_result.success = true; + m_result.too_many_vertices = false; + m_result.swaps.clear(); + return m_result; + } + if (relabelling.too_many_vertices) { + // We cannot get a new result, so just return the existing one, whether or + // not it succeeded. + if (!m_result.success) { + m_result.too_many_vertices = true; + } + return m_result; + } + TKET_ASSERT(relabelling.permutation_hash != 0); + TKET_ASSERT( + relabelling.new_to_old_vertices.size() == + relabelling.old_to_new_vertices.size()); + TKET_ASSERT(relabelling.new_to_old_vertices.size() >= 2); + + fill_result_from_table(relabelling, edges, max_number_of_swaps); + return m_result; +} + +void ExactMappingLookup::fill_result_from_table( + const CanonicalRelabelling::Result& relabelling_result, + const vector& old_edges, unsigned max_number_of_swaps) { + if (m_result.success) { + if (m_result.swaps.empty()) { + return; + } + max_number_of_swaps = + std::min(max_number_of_swaps, m_result.swaps.size() - 1); + if (max_number_of_swaps == 0) { + return; + } + } else { + m_result.swaps.clear(); + } + SwapConversion::EdgesBitset new_edges_bitset = 0; + + for (auto old_edge : old_edges) { + const auto new_v1_opt = get_optional_value( + relabelling_result.old_to_new_vertices, old_edge.first); + if (!new_v1_opt) { + continue; + } + const auto new_v2_opt = get_optional_value( + relabelling_result.old_to_new_vertices, old_edge.second); + if (!new_v2_opt) { + continue; + } + const auto new_v1 = new_v1_opt.value(); + const auto new_v2 = new_v2_opt.value(); + TKET_ASSERT(new_v1 <= 5); + TKET_ASSERT(new_v2 <= 5); + new_edges_bitset |= SwapConversion::get_edges_bitset( + SwapConversion::get_hash_from_swap(get_swap(new_v1, new_v2))); + } + + const FilteredSwapSequences::SingleSequenceData table_result( + relabelling_result.permutation_hash, new_edges_bitset, + max_number_of_swaps); + + TKET_ASSERT(table_result.number_of_swaps > 0); + if (table_result.number_of_swaps > max_number_of_swaps) { + // No result in the table. + return; + } + TKET_ASSERT(table_result.edges_bitset != 0); + TKET_ASSERT(table_result.swaps_code > 0); + + m_result.success = true; + m_result.swaps.clear(); + auto swaps_code_copy = table_result.swaps_code; + while (swaps_code_copy != 0) { + const auto& new_swap = + SwapConversion::get_swap_from_hash(swaps_code_copy & 0xF); + swaps_code_copy >>= 4; + m_result.swaps.push_back(get_swap( + relabelling_result.new_to_old_vertices.at(new_swap.first), + relabelling_result.new_to_old_vertices.at(new_swap.second))); + } + TKET_ASSERT(m_result.swaps.size() <= 16); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.hpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.hpp new file mode 100644 index 0000000000..8ac9b84bef --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.hpp @@ -0,0 +1,69 @@ + +#ifndef _TKET_TokenSwapping_TableLookup_ExactMappingLookup_H_ +#define _TKET_TokenSwapping_TableLookup_ExactMappingLookup_H_ + +#include "CanonicalRelabelling.hpp" + +namespace tket { +namespace tsa_internal { + +/** Given a raw vertex->vertex mapping which must be enacted exactly (no empty + * tokens), attempt to find an optimal or near-optimal result in a table, and + * handle all vertex back-and-forth relabelling. + */ +class ExactMappingLookup { + public: + /** If successful, "swaps" will contain a vector of swaps which performs the + * desired mapping. */ + struct Result { + std::vector swaps; + bool success; + bool too_many_vertices; + }; + + /** The Result object is stored internally. Tries to find a sequence of swaps + * in the table. + * @param desired_mapping A (source vertex) -> (target vertex) permutation. + * @param edges Edges which exist between the vertices (equivalently, the + * swaps which we are permitted to use). Edges with vertices not appearing in + * desired_mapping will simply be ignored. + * @param max_number_of_swaps Stop looking in the table if every possible + * sequence of swaps in the table which enacts the desired mapping exceeds + * this length (or doesn't exist at all). + */ + const Result& operator()( + const VertexMapping& desired_mapping, const std::vector& edges, + unsigned max_number_of_swaps = 16); + + /** Used for partial mapping lookups; like operator(), but does NOT erase the + * previous result. Overwrites with a new result if an improvement is found. + * @param desired_mapping A (source vertex) -> (target vertex) permutation. + * @param edges Edges which exist between the vertices. + * @param max_number_of_swaps Stop looking in the table once the swap + * sequences exceed this length. + */ + const Result& improve_upon_existing_result( + const VertexMapping& desired_mapping, const std::vector& edges, + unsigned max_number_of_swaps = 16); + + private: + Result m_result; + CanonicalRelabelling m_relabeller; + + /** Attempts to fill m_result, given the relabelling to use. + * If m_result already has a valid solution (i.e., "success" == true), + * only fills if the new solution has strictly fewer swaps. + * @param relabelling_result The result of relabelling, for lookup in the raw + * table. + * @param old_edges Edges which exist between the vertices before relabelling. + * @param max_number_of_swaps Stop looking once the swap sequences exceed this + * length. + */ + void fill_result_from_table( + const CanonicalRelabelling::Result& relabelling_result, + const std::vector& old_edges, unsigned max_number_of_swaps); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp new file mode 100644 index 0000000000..305bb95886 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp @@ -0,0 +1,260 @@ +#include "FilteredSwapSequences.hpp" + +#include + +#include "../TSAUtils/GeneralFunctions.hpp" +#include "SwapSequenceTable.hpp" +#include "Utils/Assert.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +/* +NOTE: the problem is: given a bitset, i.e. an unsigned int representing a set, +design a map-type data structure whose keys are unsigned integers representing +bitsets, and values are a collection of entries using that bitset (i.e., only +using swaps whose index in a global vector of allowed swaps has a "one" in the +appropriate position in the binary expansion of the bitset). + +We must be able to look up all entries whose key is a SUBSET of the given set. +(And then, search further through those values). + +We tried various things, e.g. sorting by key, using the fact that + +(X is a subset of Y) ==> (x <= y) + +where X,Y are subsets and x,y are the integers representing them; thus you can +do a kind of binary search. + +If you want the SMALLEST value for a given key, you can sort them also and do a +kind of double binary search. (Another crucial point: when searching between two +key ranges in a sorted VECTOR of keys, you can determine how many keys exist in +the range in O(log N) time, rather than O(N) time for a map). + +These fancy algorithms are all asymptotically much more efficient than the +obvious O(N) lookup, which just goes through EVERY key and checks if it's a +subset or not, then goes through every element. + +HOWEVER, experiments showed that the fancy algorithms are actually quite a bit +slower than the obvious algorithm for the table size we care about. + +*/ + +FilteredSwapSequences::SingleSequenceData::SingleSequenceData() + : edges_bitset(0), + swaps_code(0), + number_of_swaps(std::numeric_limits::max()) {} + +/* +If the entries are distributed "randomly" and fairly uniformly amongst the +bitset keys, i.e. given a bitset, look up all keys which are a subset of that, +then asymptotically using many bits in the keys is good. + +For our table sizes, experiments suggested that it's worth having 1 bit in each +bitset key (2 min for 1 bit vs. 2 min 20 sec for no bits in one test), rather +then no keys at all, BUT not worth more than 1 bit in each key. + +e.g., for 15 bits in each bitset, each of the 15 keys being one of the bits +(we have no empty keys - pointless trying to look up swap sequences if the graph +has no edges!), assume that an average lookup query contains 5 bits. Then 10/15 += 2/3 of the keys are disjoint from it, and so most of the keys immediately can +be ruled out. + +However, it's a balancing act: if you have too many keys, then the lists for +each key become so short then you're effectively almost doing a linear search +through all entries. +*/ + +void FilteredSwapSequences::initialise( + std::vector codes) { + // Can only initialise once. + TKET_ASSERT(m_internal_data.empty()); + std::sort(codes.begin(), codes.end()); + TKET_ASSERT(!codes.empty()); + TKET_ASSERT(codes[0] != 0); + TrimmedSingleSequenceData datum; + + for (size_t ii = 0; ii < codes.size(); ++ii) { + if (ii != 0 && codes[ii] == codes[ii - 1]) { + // Filter out duplicate entries. + continue; + } + datum.swaps_code = codes[ii]; + datum.edges_bitset = SwapConversion::get_edges_bitset(datum.swaps_code); + push_back(datum); + } +} + +void FilteredSwapSequences::push_back(TrimmedSingleSequenceData datum) { + auto bitset_copy = datum.edges_bitset; + TKET_ASSERT(bitset_copy != 0); + SwapConversion::EdgesBitset bit_to_use = 0; + + // We want to add to the smallest list, to keep the data balanced. + // Tests showed that this works well; the entries are distributed + // very close to uniformly amongst the 15 possible keys. + // + // This is maybe surprising, because you'd expect + // more bias: you'd expect, due to the relabelling scheme, the table to have + // swaps like (0,1), (0,2) much more frequently than higher-numbered + // vertices like (4,5). This may or may not be the case, but whatever + // the truth, there are still enough bits available overall to break + // the entries up well enough). + size_t list_size_to_use = std::numeric_limits::max(); + + while (bitset_copy != 0) { + const auto new_bit = get_rightmost_bit(bitset_copy); + // If the key does not exist, the newly created empty list will + // immediately be filled; so no key is wasted. (They're not wasted anyway, + // the table entries are very close to uniformly distributed + // amongst all 15 keys). + const auto list_size = m_internal_data[new_bit].size(); + + if (list_size < list_size_to_use) { + list_size_to_use = list_size; + bit_to_use = new_bit; + if (list_size == 0) { + break; + } + } + } + TKET_ASSERT(bit_to_use != 0); + m_internal_data[bit_to_use].push_back(datum); +} + +FilteredSwapSequences::SingleSequenceData +FilteredSwapSequences::get_lookup_result( + SwapConversion::EdgesBitset edges_bitset, unsigned max_num_swaps) const { + // NOTE: this algorithm is quite crude, BUT it's so simple that + // apparently clever algorithms, although asymptotically more efficient, + // appear to be slower. + // The clever algorithms seem only worth doing if the table becomes + // much larger, >> 100 codes for each bit at least. + + max_num_swaps = std::min(max_num_swaps, 16u); + + // Value 0xFFF...F will never occur, + // because this would be 16 consecutive equal swaps...! + const auto impossible_max_code = + std::numeric_limits::max(); + + // Stop as soon as the swaps code gets too big. + SwapConversion::SwapHash max_code; + if (max_num_swaps == 16) { + max_code = impossible_max_code; + } else { + max_code = 1; + max_code <<= (4 * max_num_swaps); + --max_code; + } + TrimmedSingleSequenceData best_datum; + best_datum.swaps_code = impossible_max_code; + + for (const auto& entry : m_internal_data) { + if (entry.first > edges_bitset) { + // The swaps used by a sequence must be a SUBSET of the allowable edges. + // Therefore, the swaps bitset must be <= the edges bitset. + // Of course, it's a MAP, so the swaps bitsets are already in increasing + // order. + break; + } + if ((entry.first & edges_bitset) != entry.first) { + // Every swap sequence in this entry contains ALL of the given edges + // in the bitset key (as well as others), and thus it MUST be a subset + // of the given edges_bitset, otherwise the entire entry + // can be skipped. + continue; + } + const auto& list = entry.second; + for (const auto& single_entry : list) { + if (single_entry.swaps_code > max_code || + single_entry.swaps_code >= best_datum.swaps_code) { + // Because they're sorted by code value, + // all subsequent entries will be too big also. + break; + } + if ((single_entry.edges_bitset & edges_bitset) != + single_entry.edges_bitset) { + // The EXACT set of edges used must be a subset of edges_bitset, + // otherwise it's unsuitable - it uses a swap not allowed. + continue; + } + best_datum = single_entry; + } + } + + SingleSequenceData result; + if (best_datum.swaps_code < impossible_max_code) { + // We actually got a result. + result.edges_bitset = best_datum.edges_bitset; + result.swaps_code = best_datum.swaps_code; + result.number_of_swaps = + SwapConversion::get_number_of_swaps(result.swaps_code); + } + return result; +} + +size_t FilteredSwapSequences::get_total_number_of_entries() const { + size_t total = 0; + for (const auto& entry : m_internal_data) { + total += entry.second.size(); + } + return total; +} + +// Convert the raw SwapSequenceTable object into +// FilteredSwapSequences-compatible data. The key is the permutation hash; the +// value is the lookup object which can find solutions to given problems. +static std::map +construct_and_return_full_table() { + std::map result; + const auto raw_table = SwapSequenceTable::get_table(); + for (const auto& entry : raw_table) { + // The simplest nontrivial permutation arises from a single swap (a,b), + // which under the canonical relabelling is converted to (01), + // which has hash 2. + TKET_ASSERT(entry.first >= 2); + // The largest possible hash comes from (01)(23)(45). + TKET_ASSERT(entry.first <= 222); + result[entry.first].initialise(entry.second); + } + return result; +} + +static const std::map& get_full_table() { + static const auto full_table(construct_and_return_full_table()); + return full_table; +} + +FilteredSwapSequences::SingleSequenceData::SingleSequenceData( + unsigned permutation_hash, SwapConversion::EdgesBitset edges_bitset, + unsigned max_number_of_swaps) + : SingleSequenceData() { + if (permutation_hash == 0) { + // The identity mapping, always possible. + number_of_swaps = 0; + return; + } + if (edges_bitset == 0) { + // No swaps at all! This CAN happen...it just means that + // we haven't seen enough vertices to connect up the given ones; + // all solutions involve swaps using other vertices not yet seen + // (i.e., not in this subgraph). + // But it's not the identity, therefore it's impossible. + return; + } + + const auto& table = get_full_table(); + const auto citer = table.find(permutation_hash); + if (citer == table.cend()) { + // No result in the table. + return; + } + *this = citer->second.get_lookup_result(edges_bitset, max_number_of_swaps); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.hpp b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.hpp new file mode 100644 index 0000000000..bc8ff6addc --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.hpp @@ -0,0 +1,126 @@ +#ifndef _TKET_TokenSwapping_TableLookup_FilteredSwapSequences_H_ +#define _TKET_TokenSwapping_TableLookup_FilteredSwapSequences_H_ + +#include +#include + +#include "SwapConversion.hpp" + +namespace tket { +namespace tsa_internal { + +/** Takes a raw list of integers, where each integer represents a swap sequence + * on the vertices {0,1,2,...,5} giving the same vertex permutation. + * Given such data, FilteredSwapSequences knows how to index and store it + * somehow (exactly how is an implementation detail - it can be thought of + * as a "database of swap sequences"), + * so that results can be looked up again, when given the edges bitset + * (i.e., edges existing in the graph, i.e. vertex swaps we are allowed to + * perform). This is for data close to the raw table data; it knows nothing + * about vertex relabelling, which of course is a crucial component. + * + * The main precomputed table of data is also accessed here, via the + * SingleSequenceData constructor. + * + * Note that the raw table contains several lists of integers, + * each one denoting different swap sequences enacting a single permutation, but + * with different edges; whereas this class only stores a single list in + * searchable form. + */ +class FilteredSwapSequences { + public: + /** A result which comes from the "raw" table data in SwapSequenceTable, with + * minimal processing. */ + struct SingleSequenceData { + /** The edges (i.e., swaps) actually used (or 0 if none are used). [This + * could be computed from swaps_code but there is no need to recompute each + * time. */ + SwapConversion::EdgesBitset edges_bitset; + + /** An integer encoding a sequence of swaps. 0 means no swaps. */ + SwapConversion::SwapHash swaps_code; + + /** The number of swaps used. Set to max() if no valid sequence was found + * (e.g., if not present in the table). */ + unsigned number_of_swaps; + + /** Initialised with "null" values automatically, i.e. number_of_swaps + * taking value max(). */ + SingleSequenceData(); + + /** This is how we access the fixed data in the large const static global + * table. This constructor looks up the shortest sequence of swaps enacting + * the given permutation, and fills the entries. + * @param permutation_hash The hash of the desired permutation of + * {0,1,2,...,5}, as used to look up results in the table (after + * relabelling). See CanonicalRelabelling for explanation. + * @param edges_bitset The collection of edges on {0,1,2,...,5} which + * actually exist in the graph (i.e., the swaps which are allowed). + * @param max_number_of_swaps Do not return any solutions with more swaps + * than this: useful speedup to allow early termination. + */ + SingleSequenceData( + unsigned permutation_hash, SwapConversion::EdgesBitset edges_bitset, + unsigned max_number_of_swaps); + }; + + /** Index and process the raw data to allow later retrieval. Can only be done + * once (a security measure to avoid accidentally reconstructing large tables + * multiple times). The codes don't need to be sorted OR deduplicated. + * Duplicate, redundant and suboptimal data IS tolerated, as long as it is + * correct. Such data could lead to slowdowns from a larger table, BUT will + * not affect the actual results (i.e., if the data contains some entries + * inferior to others, then the inferior results will automatically never be + * returned, because the superior ones will always be found). + * @param codes The raw list of integers stored in the original table + */ + void initialise(std::vector codes); + + /** Search for the entry with fewest swaps whose edges_bitset is a + * subset of the given edges_bitset (so that it only uses allowed swaps). + * If there is no suitable sequence in the table, returns a null object. + * Stop searching early if it finds that all entries have too many swaps. + * @param allowed_swaps The swaps which can occur (in other words, the + * existing edges in the graph). + * @param max_num_swaps Don't return any entries with more than this many + * swaps. + * @return An entry with the fewest swaps, or a null entry if none exists. + */ + SingleSequenceData get_lookup_result( + SwapConversion::EdgesBitset allowed_swaps, unsigned max_num_swaps) const; + + /** For testing, just count how many entries we've stored. + * @return The total number of encoded swap sequences stored internally. + */ + size_t get_total_number_of_entries() const; + + private: + /** We recalculate the number of swaps each time, rather than storing. + * We just sort by swaps_code, since this respects numbers of swaps. + * I.e., if S1, S2 are swap sequences, and encoding(S(j)) is an integer, then + * length(S1) < length(S2) => encoding(S1) < encoding(S2). + * Thus, minimising encoding(S) will also force minimising length(S). + */ + struct TrimmedSingleSequenceData { + SwapConversion::EdgesBitset edges_bitset; + SwapConversion::SwapHash swaps_code; + }; + + /** Key: a subset of bits in edges_bitset. + * Value: codes containing those bits in their edges bitset, sorted in + * increasing order. No entry occurs multiple times, but the values are spread + * out amongst the keys to balance the data better and give faster lookup. + */ + std::map> + m_internal_data; + + /** Must be pushed back in increasing order of swaps_code. Processes and + * stores the result for later searchability. + * @param datum Information about a single raw entry from the table. + */ + void push_back(TrimmedSingleSequenceData datum); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp new file mode 100644 index 0000000000..f4e86f8aa2 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp @@ -0,0 +1,78 @@ +#include "PartialMappingLookup.hpp" + +#include + +#include "Utils/Assert.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +const ExactMappingLookup::Result& PartialMappingLookup::operator()( + const VertexMapping& desired_mapping, const vector& edges, + const std::set& vertices_with_tokens_at_start, + unsigned max_number_of_swaps) { + const auto& exact_mapping_result = + m_exact_mapping_lookup(desired_mapping, edges, max_number_of_swaps); + + if (exact_mapping_result.success && exact_mapping_result.swaps.empty()) { + return exact_mapping_result; + } + + // Are there any empty vertices? + m_empty_source_vertices.clear(); + m_empty_target_vertices.clear(); + for (const auto& entry : desired_mapping) { + if (vertices_with_tokens_at_start.count(entry.first) == 0) { + m_empty_source_vertices.push_back(entry.first); + m_empty_target_vertices.push_back(entry.second); + } + } + if (m_empty_source_vertices.size() <= 1) { + // Only an exact lookup is needed (or possible). + return exact_mapping_result; + } + + // There are some empty vertices at the start. + // These END UP at empty target vertices + // (which, of course, might be completely different!) + // For next_permutation, let's permute the empty SOURCE vertices. + // They are already sorted, thus already at the first permutation + // in the ordering, because they came from the keys of desired_mapping. + TKET_ASSERT(std::next_permutation( + m_empty_source_vertices.begin(), m_empty_source_vertices.end())); + m_altered_mapping = desired_mapping; + + for (unsigned perm_count = 0;;) { + for (unsigned ii = 0; ii < m_empty_source_vertices.size(); ++ii) { + m_altered_mapping[m_empty_source_vertices[ii]] = + m_empty_target_vertices[ii]; + } + const auto& exact_map_result_for_permuted_vertices = + m_exact_mapping_lookup.improve_upon_existing_result( + m_altered_mapping, edges, max_number_of_swaps); + + if (exact_map_result_for_permuted_vertices.success && + exact_map_result_for_permuted_vertices.swaps.empty()) { + return exact_map_result_for_permuted_vertices; + } + ++perm_count; + if (perm_count >= m_parameters.max_number_of_empty_vertex_permutations || + !std::next_permutation( + m_empty_source_vertices.begin(), m_empty_source_vertices.end())) { + return exact_map_result_for_permuted_vertices; + } + } +} + +PartialMappingLookup::Parameters::Parameters() + : max_number_of_empty_vertex_permutations(10) {} + +PartialMappingLookup::Parameters& PartialMappingLookup::get_parameters() { + return m_parameters; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.hpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.hpp new file mode 100644 index 0000000000..0ff0f3f8c6 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.hpp @@ -0,0 +1,64 @@ + +#ifndef _TKET_TokenSwapping_TableLookup_PartialMappingLookup_H_ +#define _TKET_TokenSwapping_TableLookup_PartialMappingLookup_H_ + +#include +#include +#include + +#include "ExactMappingLookup.hpp" + +namespace tket { +namespace tsa_internal { + +/** This is the same as ExactMappingLookup, except that we allow vertices not to + * have tokens. It works simply by going through possible permutations of empty + * vertices and doing an exact permutation lookup (limiting the number of + * permutations to avoid excessive slowdown). + */ +class PartialMappingLookup { + public: + /** Parameters controlling the partial mapping lookup. Sensible defaults, + * found by experimentation. */ + struct Parameters { + /** To speed up, don't try all permutations if there are many empty + * vertices; limit them to this number. */ + unsigned max_number_of_empty_vertex_permutations; + + Parameters(); + }; + + /** If desired, change some internal parameters. + * @return Internal parameters object, to be changed if desired. + */ + Parameters& get_parameters(); + + /** The result is stored internally. The same format as ExactMappingLookup. + * @param desired_mapping A (source vertex) -> (target vertex) permutation. + * @param edges Edges which exist between the vertices (equivalently, the + * swaps which we are permitted to use). Edges with vertices not appearing in + * desired_mapping will simply be ignored. + * @param vertices_with_tokens_at_start Every vertex mentioned within + * desired_mapping which has a token, just BEFORE the swaps are performed to + * enact the desired_mapping, must be mentioned here. Other vertices not + * mentioned in the mapping are allowed; they will simply be ignored. + * @param max_number_of_swaps Stop looking if every sequence of swaps in the + * table which enacts the desired mapping exceeds this length (or doesn't + * exist at all). + */ + const ExactMappingLookup::Result& operator()( + const VertexMapping& desired_mapping, const std::vector& edges, + const std::set& vertices_with_tokens_at_start, + unsigned max_number_of_swaps = 16); + + private: + Parameters m_parameters; + ExactMappingLookup m_exact_mapping_lookup; + std::vector m_empty_source_vertices; + std::vector m_empty_target_vertices; + VertexMapping m_altered_mapping; +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp new file mode 100644 index 0000000000..382ae65f4e --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp @@ -0,0 +1,76 @@ +#include "SwapConversion.hpp" + +#include "Utils/Assert.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +static vector get_swaps_fixed_vector() { + vector swaps; + for (unsigned ii = 0; ii < 6; ++ii) { + for (unsigned jj = ii + 1; jj < 6; ++jj) { + swaps.push_back(get_swap(ii, jj)); + } + } + TKET_ASSERT(swaps.size() == 15); + return swaps; +} + +static const vector& get_swaps_global() { + static const auto swaps_vect(get_swaps_fixed_vector()); + return swaps_vect; +} + +const Swap& SwapConversion::get_swap_from_hash(SwapHash x) { + return get_swaps_global().at(x - 1); +} + +static std::map get_swap_to_hash() { + const auto swaps = get_swaps_fixed_vector(); + std::map map; + for (unsigned ii = 0; ii < swaps.size(); ++ii) { + map[swaps[ii]] = ii + 1; + } + return map; +} + +static const std::map& +get_swap_to_hash_global() { + static const auto map(get_swap_to_hash()); + return map; +} + +SwapConversion::SwapHash SwapConversion::get_hash_from_swap(const Swap& swap) { + return get_swap_to_hash_global().at(swap); +} + +unsigned SwapConversion::get_number_of_swaps( + SwapConversion::SwapHash swaps_code) { + unsigned num_swaps = 0; + while (swaps_code != 0) { + ++num_swaps; + const auto swap_hash = swaps_code & 0xF; + swaps_code >>= 4; + TKET_ASSERT(swap_hash > 0); + TKET_ASSERT(swap_hash <= 15); + } + return num_swaps; +} + +SwapConversion::EdgesBitset SwapConversion::get_edges_bitset( + SwapHash swaps_code) { + EdgesBitset edges_bitset = 0; + while (swaps_code != 0) { + const auto swap_hash = swaps_code & 0xF; + TKET_ASSERT(swap_hash > 0); + edges_bitset |= (1u << (swap_hash - 1)); + swaps_code >>= 4; + } + return edges_bitset; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.hpp b/tket/src/TokenSwapping/TableLookup/SwapConversion.hpp new file mode 100644 index 0000000000..38b8424821 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapConversion.hpp @@ -0,0 +1,89 @@ +#ifndef _TKET_TokenSwapping_TableLookup_SwapConversion_H_ +#define _TKET_TokenSwapping_TableLookup_SwapConversion_H_ + +#include + +#include "../TSAUtils/SwapFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/* +NOTE on ENCODING: with 6 vertices, there are 15 possible edges or swaps. +Thus, we can encode a single swap by a number in the range 0-15 (using 0 to +denote "no swap"). + +This fits into 4 bits exactly. + +Thus, a single 64-bit unsigned int can store any swap sequence of length <= 16. +We also have the added benefit that ints written in hexadecimal are easier for a +human to read, since each hex digit 0-9 or A-F corresponds to a single swap. + +An obvious optimisation is that adjacent swaps should be different; +and also, blocks of four zeros cannot occur within the encoding. +However, this would still only reduce the total number to about 30%, +so we'd still need 62 or 63 bits to represent all sequences of length <= 16. +So it's not worth trying fancy encodings to store more possible sequences in +fewer bits, without a good theoretical breakthrough to come up with a really +good way to encode and search through only optimal or "near optimal" sequences. + +If we desire in future to increase the number of vertices, we'd have to use at +least 5 bits per swap, so could only fit sequences of length <= 12 in a 64-bit +int. Of course, (8*7)/2 = 28 < 31, so we could store swaps on <= 8 vertices +instead of 6. + +*/ + +// Generally no checks on the input values, it's assumed that the caller +// knows how the table encoding works. +// The possible swaps (01), (02), (03), ..., (45) on vertices {0,1,2,3,4,5} +// are listed in a global vector, so with values 0,1,...,14. +// Adding 1 to the index gives possible values 1,2,...,15 for the swaps, +// and 0 means no swap. Thus a sequence of swaps is encoded by storing the bits +// in a uint, with first swap at the least significant bits, and so on with +// leftward shifts by 4 bits each time. + +struct SwapConversion { + /** Encodes a sequence of <=16 swaps, each swap being one of + * the 15 possible swaps on vertices {0,1,2,3,4,5}, and hence encoded by 4 + * bits. Zero represents the empty sequence. */ + typedef std::uint64_t SwapHash; + + /** Encodes a set of swaps, each one taken from the 15 possibilities. With + * each swap given a numerical value from 1 to 15, we simply shift 1u by that + * amount (minus one), and OR them together. Thus, when looking up in a table, + * we only allow swap sequences whose edge bitset is a SUBSET of a given edges + * bitset (corresponding to the edges in the graph, i.e. allowed swaps). + */ + typedef std::uint_fast16_t EdgesBitset; + + /** Given a valid number x, return the actual swap on vertices {0,1,2,3,4,5} + * which it represents. + * @param x A code number representing a single swap. + * @return A single swap on vertices {0,1,2,3,4,5}. + */ + static const Swap& get_swap_from_hash(SwapHash x); + + /** The opposite of get_swap_from_hash. + * @param swap A swap on {0,1,2,3,4,5}. (Must be in standard order, i.e. (i,j) + * with 0 <= i < j <= 5). + * @return A number 1-15 which encodes that swap in the table. + */ + static SwapHash get_hash_from_swap(const Swap& swap); + + /** Converting swaps to bitsets, which swaps are used in the code? + * @param swaps_code An integer representing a sequence of swaps. + * @return The set of swaps used in the sequence, encoded as a binary number. + */ + static EdgesBitset get_edges_bitset(SwapHash swaps_code); + + /** The number of swaps in a sequence. + * @param swaps_code An integer representing a sequence of swaps. + * @return The length of the swap sequence. + */ + static unsigned get_number_of_swaps(SwapHash swaps_code); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp new file mode 100644 index 0000000000..197def52d0 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp @@ -0,0 +1,162 @@ +#include "SwapListSegmentOptimiser.hpp" + +#include +#include +#include + +#include "../../Utils/Assert.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +const SwapListSegmentOptimiser::Output& +SwapListSegmentOptimiser::optimise_segment( + SwapID initial_id, const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list) { + m_best_optimised_swaps.clear(); + + // Nonzero if and only if a valid sequence of swaps was stored. + m_output.initial_segment_size = 0; + + // If the mapping has too many vertices, it MAY happen that + // adding more swaps REDUCES the number of vertices + // (since, some vertices may move back to their original positions, + // and hence be "ignored"). Thus, we ALLOW the lookup to fail a few times + // due to too many vertices before we give up. + const int max_consecutive_too_many_vertices = 5; + int too_many_vertices_count = max_consecutive_too_many_vertices; + + VertexMapping current_map; + { + const auto& initial_swap = swap_list.at(initial_id); + current_map[initial_swap.first] = initial_swap.second; + current_map[initial_swap.second] = initial_swap.first; + } + size_t current_number_of_swaps = 1; + VertexMapping current_map_copy; + for (auto next_id_opt = swap_list.next(initial_id);;) { + bool too_many_vertices = false; + + // As we keep adding swaps to a sequence and updating the resultant + // target->source vertex mapping, should we look up EVERY mapping in the + // table, or is it enough to do so only when the map increases in size, etc. + // etc.? Desperately need some theory here! We look up almost EVERYTHING, so + // table lookup is one possible slowdown; reducing unnecessary lookups is + // worthwhile. + // TODO: think of some theory, and experiment! + bool attempt_to_optimise = current_map.size() >= 3; + if (!attempt_to_optimise && !next_id_opt) { + // Because it's the FINAL segment, optimise it whatever we do. + attempt_to_optimise = true; + } + if (attempt_to_optimise) { + // We're going to attempt to optimise. + current_map_copy = current_map; + const auto& resize_result = map_resizing.resize_mapping(current_map); + if (resize_result.success) { + const auto& lookup_result = m_mapping_lookup( + current_map, resize_result.edges, vertices_with_tokens_at_start, + current_number_of_swaps); + + if (lookup_result.success) { + // We've got a new result from the table; do we store it? + bool should_store = m_output.initial_segment_size == 0; + if (!should_store) { + // Something IS stored, but is our new solution better? + TKET_ASSERT( + m_output.initial_segment_size >= m_best_optimised_swaps.size()); + const size_t current_decrease = + m_output.initial_segment_size - m_best_optimised_swaps.size(); + TKET_ASSERT(current_number_of_swaps >= lookup_result.swaps.size()); + const size_t new_decrease = + current_number_of_swaps - lookup_result.swaps.size(); + should_store = new_decrease > current_decrease; + } + if (should_store) { + m_output.initial_segment_size = current_number_of_swaps; + m_best_optimised_swaps = lookup_result.swaps; + } + } else { + if (lookup_result.too_many_vertices) { + too_many_vertices = true; + } + } + } else { + // We couldn't resize the mapping, so there must be too many vertices. + too_many_vertices = true; + // Also, the vertex mapping may be corrupted, so restore it + current_map = current_map_copy; + } + } + + if (too_many_vertices) { + --too_many_vertices_count; + if (too_many_vertices_count <= 0) { + break; + } + } else { + too_many_vertices_count = max_consecutive_too_many_vertices; + } + + // Now add a swap. + if (next_id_opt) { + const auto id = next_id_opt.value(); + const Swap swap = swap_list.at(id); + add_swap(current_map, swap); + ++current_number_of_swaps; + next_id_opt = swap_list.next(id); + } else { + // We've reached the end! + break; + } + } + fill_final_output_and_swaplist(initial_id, swap_list); + return m_output; +} + +void SwapListSegmentOptimiser::fill_final_output_and_swaplist( + SwapID initial_id, SwapList& swap_list) { + if (m_output.initial_segment_size == 0) { + // No improvement was found. + m_output.final_segment_size = 0; + m_output.new_segment_last_id = {}; + return; + } + m_output.final_segment_size = m_best_optimised_swaps.size(); + TKET_ASSERT(m_output.final_segment_size <= m_output.initial_segment_size); + const auto initial_size = swap_list.size(); + + if (m_best_optimised_swaps.empty()) { + swap_list.erase_interval(initial_id, m_output.initial_segment_size); + m_output.new_segment_last_id = {}; + } else { + const auto overwrite_result = swap_list.overwrite_interval( + initial_id, m_best_optimised_swaps.cbegin(), + m_best_optimised_swaps.cend()); + + TKET_ASSERT( + overwrite_result.number_of_overwritten_elements == + m_best_optimised_swaps.size()); + m_output.new_segment_last_id = + overwrite_result.final_overwritten_element_id; + + const size_t remaining_elements_to_erase = + m_output.initial_segment_size - m_output.final_segment_size; + + const auto next_id_opt = + swap_list.next(overwrite_result.final_overwritten_element_id); + if (next_id_opt) { + swap_list.erase_interval( + next_id_opt.value(), remaining_elements_to_erase); + } + } + TKET_ASSERT( + swap_list.size() + m_output.initial_segment_size == + initial_size + m_output.final_segment_size); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp new file mode 100644 index 0000000000..41d9e606b9 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp @@ -0,0 +1,85 @@ + +#ifndef _TKET_TokenSwapping_TableLookup_SwapListSegmentOptimiser_H_ +#define _TKET_TokenSwapping_TableLookup_SwapListSegmentOptimiser_H_ + +#include +#include +#include + +#include "../TSAUtils/SwapFunctions.hpp" +#include "PartialMappingLookup.hpp" +#include "VertexMapResizing.hpp" + +namespace tket { +namespace tsa_internal { + +/** Given a swap list and a start point in the list, uses the lookup table + * to reduce an interval of swaps, replacing them in-place by a shorter sequence + * with the same end-to-end vertex mapping (although source->target mappings may + * change for empty source vertices, i.e. those without a token at the + * beginning). + */ +class SwapListSegmentOptimiser { + public: + struct Output { + /** The length of the segment that was replaced. + * Of course, this will be zero if no optimisation takes place. + */ + size_t initial_segment_size; + + /** The length of the segment after replacement. Always <= + * initial_segment_size. */ + size_t final_segment_size; + + /** If we did replace a segment with a shorter one, give the ID of the last + * swap of the segment. It might be null because the new segment might be + * empty. + */ + std::optional new_segment_last_id; + }; + + /** Starting at the given ID, which must be valid, move forward to examine an + * interval of swaps, and try to replace it with a shorter sequence looked up + * in the table. It MAY replace a segment with a different one of equal + * length; optimisation has probably already taken place, and couldn't break + * it up any further. If the table suggests a different but still valid + * interval, it MAY afford further opportunities for optimisation even if it's + * of the same length, so we might as well splice in the new segment. + * @param initial_id The ID within the swap list of the first swap which may + * be replaced, where we begin optimisation. + * @param vertices_with_tokens_at_start Just before the swap at initial_id is + * performed, which vertices have tokens on them? Extra unused vertices are + * allowed (but are helpful, since they may be added into the new sequence to + * reduce length). + * @param map_resizing An object to add/remove vertices from the mapping, with + * knowledge of edges in the graph (not just those involved in the swap list). + * @param swap_list The sequence of swaps to be reduced, in-place. + * @return An object stored internally, with information about the segment + * replacement/reduction (if any). + */ + const Output& optimise_segment( + SwapID initial_id, const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list); + + private: + Output m_output; + PartialMappingLookup m_mapping_lookup; + + // Naively, a greedy-type way to optimise is to + // reduce the SHORTEST sequence possible, by the LARGEST amount. + // This may not always be optimal, but should be OK. + std::vector m_best_optimised_swaps; + + /** Once m_output.initial_segment_size and m_best_optimised_swaps have been + * filled, fill in the rest of the data in m_output and make the swap + * replacements in swap_list. + * @param initial_id The ID within the swap list of the first swap which may + * be replaced, where we begin optimisation. + * @param swap_list The sequence of swaps to be reduced, in-place. + */ + void fill_final_output_and_swaplist(SwapID initial_id, SwapList& swap_list); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp new file mode 100644 index 0000000000..5b1de153f9 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp @@ -0,0 +1,223 @@ +#include "SwapListTableOptimiser.hpp" + +#include +#include +#include + +#include "../TSAUtils/DebugFunctions.hpp" +#include "Utils/Assert.hpp" + +; + +namespace tket { +namespace tsa_internal { + +enum class EmptySwapCheckResult { + NOT_EMPTY, + CONTINUE_AFTER_ERASURE, + TERMINATE_AFTER_ERASURE +}; + +// current_id is KNOWN to be valid. +// vertices_with_tokens is correct just BEFORE performing the swap. +// If the swap is empty, erase it and update current_id (to the next swap). +static EmptySwapCheckResult check_for_empty_swap( + const std::set& vertices_with_tokens, SwapID& current_id, + SwapList& swap_list) { + const auto swap = swap_list.at(current_id); + if (vertices_with_tokens.count(swap.first) != 0 || + vertices_with_tokens.count(swap.second) != 0) { + return EmptySwapCheckResult::NOT_EMPTY; + } + const auto next_id_opt = swap_list.next(current_id); + swap_list.erase(current_id); + if (!next_id_opt) { + return EmptySwapCheckResult::TERMINATE_AFTER_ERASURE; + } + current_id = next_id_opt.value(); + return EmptySwapCheckResult::CONTINUE_AFTER_ERASURE; +} + +// current_id is KNOWN to be valid. +// vertices_with_tokens is correct just BEFORE performing the swap. +// Keep erasing empty swaps and updating current_id +// until EITHER we hit a nonempty swap, OR we run out of swaps, +// and thus return false. +static bool erase_empty_swaps_interval( + const std::set& vertices_with_tokens, SwapID& current_id, + SwapList& swap_list) { + for (auto infinite_loop_guard = 1 + swap_list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + switch (check_for_empty_swap(vertices_with_tokens, current_id, swap_list)) { + case EmptySwapCheckResult::CONTINUE_AFTER_ERASURE: + // Maybe more to erase! + break; + case EmptySwapCheckResult::NOT_EMPTY: + return true; + case EmptySwapCheckResult::TERMINATE_AFTER_ERASURE: + return false; + default: + TKET_ASSERT(!"unknown EmptySwapCheckResult enum"); + break; + } + } + // Should never get here! + TKET_ASSERT(!"erase_empty_swaps_interval falied to terminate"); + return false; +} + +// current_id is KNOWN to be valid and nonempty. +// vertices_with_tokens is correct just BEFORE we perform the current swap. +// Perform the swap (i.e., updating vertices_with_tokens), +// and advance current_id to the next swap. +static bool perform_current_nonempty_swap( + std::set& vertices_with_tokens, SwapID& current_id, + const SwapList& swap_list) { + const auto swap = swap_list.at(current_id); + + if (vertices_with_tokens.count(swap.first) == 0) { + // No empty swaps! + TKET_ASSERT(vertices_with_tokens.count(swap.second) != 0); + // Second has a token, first doesn't. + TKET_ASSERT(vertices_with_tokens.insert(swap.first).second); + TKET_ASSERT(vertices_with_tokens.erase(swap.second) == 1); + } else { + // First has a token. + if (vertices_with_tokens.count(swap.second) == 0) { + // Second has no token. + TKET_ASSERT(vertices_with_tokens.erase(swap.first) == 1); + TKET_ASSERT(vertices_with_tokens.insert(swap.second).second); + } + } + + const auto next_id_opt = swap_list.next(current_id); + if (!next_id_opt) { + return false; + } + current_id = next_id_opt.value(); + return true; +} + +void SwapListTableOptimiser::optimise( + const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list, + SwapListOptimiser& swap_list_optimiser) { + if (vertices_with_tokens_at_start.empty()) { + swap_list.clear(); + return; + } + if (swap_list.empty()) { + return; + } + + // Because we'll go in both directions, we need to know + // which tokens exist at the END of the mapping. + auto vertices_with_tokens_at_end = vertices_with_tokens_at_start; + { + // Already checked to be nonempty. + auto current_id = swap_list.front_id().value(); + bool terminated_correctly = false; + for (auto infinite_loop_guard = 1 + swap_list.size(); + infinite_loop_guard > 0; --infinite_loop_guard) { + if (!erase_empty_swaps_interval( + vertices_with_tokens_at_end, current_id, swap_list)) { + terminated_correctly = true; + break; + } + if (!perform_current_nonempty_swap( + vertices_with_tokens_at_end, current_id, swap_list)) { + terminated_correctly = true; + break; + } + } + TKET_ASSERT(terminated_correctly); + if (swap_list.size() <= 1) { + return; + } + } + // Now begin the forward/backward loop. + for (auto infinite_loop_guard = 1 + swap_list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto old_size = swap_list.size(); + optimise_in_forward_direction( + vertices_with_tokens_at_start, map_resizing, swap_list, + swap_list_optimiser); + + swap_list.reverse(); + optimise_in_forward_direction( + vertices_with_tokens_at_end, map_resizing, swap_list, + swap_list_optimiser); + + // Must reverse again to get back to start! + swap_list.reverse(); + const auto new_size = swap_list.size(); + TKET_ASSERT(new_size <= old_size); + if (new_size == old_size) { + return; + } + } + TKET_ASSERT(!"SwapListTableOptimiser::optimise"); +} + +void SwapListTableOptimiser::optimise_in_forward_direction( + const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list, + SwapListOptimiser& swap_list_optimiser) { + swap_list_optimiser.optimise_pass_with_frontward_travel(swap_list); + + m_segment_optimiser.optimise_segment( + swap_list.front_id().value(), vertices_with_tokens_at_start, map_resizing, + swap_list); + + if (swap_list.size() <= 1) { + return; + } + // Will always remain valid. We perform this swap and then optimise + // starting from the next one. + auto current_id = swap_list.front_id().value(); + auto vertices_with_tokens = vertices_with_tokens_at_start; + + for (size_t infinite_loop_guard = swap_list.size(); infinite_loop_guard != 0; + --infinite_loop_guard) { + if (!erase_empty_swaps_interval( + vertices_with_tokens, current_id, swap_list)) { + return; + } + // We now have a valid ID with nonempty swap. + if (!perform_current_nonempty_swap( + vertices_with_tokens, current_id, swap_list)) { + return; + } + + // NOW we want to optimise from this ID. + // However, we must be careful; maybe it will be erased, so we have + // to get the PREVIOUS and recover from there. + const auto previous_id_opt = swap_list.previous(current_id); + + m_segment_optimiser.optimise_segment( + current_id, vertices_with_tokens, map_resizing, swap_list); + + // We now want to set "current_id" to the first swap of + // the newly optimised segment (if any) - which may of course + // be unchanged, changed, or empty. + + // If there was no previous ID, we must have been at the front + // just before we optimised. + auto current_id_opt = swap_list.front_id(); + if (previous_id_opt) { + // There WAS a previous ID, so we CAN move onto the next. + current_id_opt = swap_list.next(previous_id_opt.value()); + } + if (!current_id_opt) { + return; + } + current_id = current_id_opt.value(); + } +} + +SwapListSegmentOptimiser& SwapListTableOptimiser::get_segment_optimiser() { + return m_segment_optimiser; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp new file mode 100644 index 0000000000..0ec2dae717 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp @@ -0,0 +1,79 @@ + +#ifndef _TKET_TokenSwapping_TableLookup_SwapListTableOptimiser_H_ +#define _TKET_TokenSwapping_TableLookup_SwapListTableOptimiser_H_ + +#include + +#include "../SwapListOptimiser.hpp" +#include "PartialMappingLookup.hpp" +#include "SwapListSegmentOptimiser.hpp" +#include "VertexMapResizing.hpp" + +/// TODO: The swap table optimiser currently tries to optimise many segments; +/// solving ~2300 problems with Best TSA takes ~20 seconds, most of which +/// is the table optimisation part. +/// Certainly we can cut down the number of segments optimised; +/// needs experimentation. + +namespace tket { +namespace tsa_internal { + +/** Uses the lookup table to reduce many intervals of a swap sequence. */ +class SwapListTableOptimiser { + public: + /** Reduce the given list of swap in-place, by using the big lookup table. + * Swaps may be significantly reordered, and the final end-to-end + * permutation of vertices may change; only the partial mapping of those + * vertices with tokens is preserved. It's not actually clear what the best + * method is; experimentation is still needed. We can optimise any segment, + * i.e. between any two points. But then, which other segments should we + * choose? Should we overlap trial segments? Should we then combine with the + * simple SwapListOptimiser again? We are lacking a lot of theory to guide us. + * This pass will erase some empty swaps, but doesn't guarantee to find all + * (although in practice, it never does produce empty swaps, if they were + * previously well optimised with a swap list optimiser. Is this "luck", or is + * there a theoretical reason?) + * @param vertices_with_tokens_at_start Before we perform any swaps, which + * vertices have tokens on them? Other vertices are allowed to be moved around + * arbitrarily. + * @param map_resizing An object to take a VertexMapping and enlarge/contract + * it to give the desired number of vertices. So, this object knows about the + * edges in the graph. + * @param swap_list The sequence of swaps to be shortened. + * @param swap_list_optimiser An object to handle non-table optimisations. + * This is used only to do the basic passes needed to make the table effective + * (i.e., clustering interacting swaps together). + */ + void optimise( + const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list, + SwapListOptimiser& swap_list_optimiser); + + /** For testing, give internal access to the segment optimiser. + * @return a reference to the internal segment optimiser object. + */ + SwapListSegmentOptimiser& get_segment_optimiser(); + + private: + SwapListSegmentOptimiser m_segment_optimiser; + + /** The same interface as "optimise", which goes in both directions, + * and calls this function in a loop, repeatedly reversing and re-reversing + * the swap list to do both directions. A bit crude, but simple and not + * actually too inefficient. + * @param @param vertices_with_tokens_at_start Before we perform any swaps, + * which vertices have tokens on them? + * @param map_resizing An object to take a VertexMapping and enlarge/contract + * it to give the desired number of vertices. + * @param swap_list The sequence of swaps to be shortened. + * @param swap_list_optimiser An object to handle non-table optimisations. + */ + void optimise_in_forward_direction( + const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list, + SwapListOptimiser& swap_list_optimiser); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp new file mode 100644 index 0000000000..2cbbf21af2 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp @@ -0,0 +1,1408 @@ +#include "SwapSequenceTable.hpp" + +namespace tket { +namespace tsa_internal { + +SwapSequenceTable::Table SwapSequenceTable::get_table() { + Table map; + + // clang-format off + map[2] = { + 0x1, 0x262, 0x373, 0x484, 0x595, 0x27a72, 0x28b82, 0x29c92, + 0x36a63, 0x38d83, 0x39e93, 0x46b64, 0x47d74, 0x49f94, 0x56c65, 0x57e75, + 0x58f85, 0x27dbd72, 0x27ece72, 0x28dad82, 0x28fcf82, 0x29eae92, + 0x29fbf92, 0x36bdb63, 0x36cec63, 0x38bab83, 0x38fef83, 0x39cac93, + 0x39fdf93, 0x46ada64, 0x46cfc64, 0x47aba74, 0x47efe74, 0x49cbc94, + 0x49ede94, 0x56aea65, 0x56bfb65, 0x57aca75, 0x57dfd75, 0x58bcb85, + 0x58ded85, 0x2a8fea2f8, 0x2a9fda2f9, 0x2b7efb2e7, 0x2b9edb2e9, + 0x2c7dfc2d7, 0x2c8dec2d8, 0x3a8fca3f8, 0x3a9fba3f9, 0x3d6cfd3c6, + 0x3d9cbd3c9, 0x3e6bfe3b6, 0x3e8bce3b8, 0x4b7ecb4e7, 0x4b9eab4e9, + 0x4d6ced4c6, 0x4d9cad4c9, 0x4f6aef4a6, 0x4f7acf4a7, 0x5c7dbc5d7, + 0x5c8dac5d8, 0x5e6bde5b6, 0x5e8bae5b8, 0x5f6adf5a6, 0x5f7abf5a7, + }; + + map[3] = { + 0x16, 0x21, 0x62, 0x17a7, 0x18b8, 0x19c9, 0x2373, 0x2484, 0x2595, + 0x36a3, 0x3736, 0x3a31, 0x3a73, 0x46b4, 0x4846, 0x4b41, 0x4b84, 0x56c5, + 0x5956, 0x5c51, 0x5c95, 0x7a27, 0x8b28, 0x9c29, 0x17bd7b, + 0x17ce7c, 0x18ad8a, 0x18cf8c, 0x19ae9a, 0x19bf9b, 0x238d38, 0x239e39, + 0x247d47, 0x249f49, 0x257e57, 0x258f58, 0x36bd3b, 0x36ce3c, 0x3738b8, + 0x3739c9, 0x38d386, 0x38db83, 0x39e396, 0x39ec93, 0x3a3484, 0x3a3595, + 0x3ad8d3, 0x3ae9e3, 0x3b8ba3, 0x3bdb73, 0x3c9ca3, 0x3cec73, 0x3dbd31, + 0x3ece31, 0x46ad4a, 0x46cf4c, 0x47d476, 0x47da74, 0x4847a7, 0x4849c9, + 0x49f496, 0x49fc94, 0x4a7ab4, 0x4ada84, 0x4b4373, 0x4b4595, 0x4bd7d4, + 0x4bf9f4, 0x4c9cb4, 0x4cfc84, 0x4dad41, 0x4fcf41, 0x56ae5a, 0x56bf5b, + 0x57e576, 0x57ea75, 0x58f586, 0x58fb85, 0x5957a7, 0x5958b8, 0x5a7ac5, + 0x5aea95, 0x5b8bc5, 0x5bfb95, 0x5c5373, 0x5c5484, 0x5ce7e5, 0x5cf8f5, + 0x5eae51, 0x5fbf51, 0x7bd7b2, 0x7ce7c2, 0x8ad8a2, 0x8cf8c2, 0x9ae9a2, + 0x9bf9b2, 0x17bfe7fb, 0x17cfd7fc, 0x18aef8ea, 0x18ced8ec, 0x19adf9da, + 0x19bde9db, 0x238fe3f8, 0x239fd3f9, 0x247ef4e7, 0x249ed4e9, 0x257df5d7, + 0x258de5d8, 0x3738fcf8, 0x3739fbf9, 0x38dfcf83, 0x38fca3f8, 0x38fe3cf8, + 0x39efbf93, 0x39fba3f9, 0x39fd3bf9, 0x3a349f94, 0x3a358f85, 0x3ad9f9d3, + 0x3ae8f8e3, 0x3b8bcec3, 0x3b8fefb3, 0x3bdb3595, 0x3bdbe9e3, 0x3bfe31fb, + 0x3bfefb73, 0x3c9cbdb3, 0x3c9fdfc3, 0x3cec3484, 0x3cecd8d3, 0x3cfd31fc, + 0x3cfdfc73, 0x3d6cfcd3, 0x3d89c9d3, 0x3d9f96d3, 0x3e6bfbe3, 0x3e8f86e3, + 0x3e98b8e3, 0x47dece74, 0x47ecb4e7, 0x47ef4ce7, 0x4847ece7, 0x4849eae9, + 0x49eab4e9, 0x49ed4ae9, 0x49feae94, 0x4a7acfc4, 0x4a7efea4, 0x4ada4595, + 0x4adaf9f4, 0x4aef41ea, 0x4aefea84, 0x4b439e93, 0x4b457e75, 0x4bd9e9d4, + 0x4bf7e7f4, 0x4c9cada4, 0x4c9edec4, 0x4ced41ec, 0x4cedec84, 0x4cfc4373, + 0x4cfcd7d4, 0x4d6cecd4, 0x4d79c9d4, 0x4d9e96d4, 0x4f6aeaf4, 0x4f7e76f4, + 0x4f97a7f4, 0x57dbc5d7, 0x57df5bd7, 0x57edbd75, 0x58dac5d8, 0x58de5ad8, + 0x58fdad85, 0x5957dbd7, 0x5958dad8, 0x5a7abfb5, 0x5a7dfda5, 0x5adf51da, + 0x5adfda95, 0x5aea5484, 0x5aeaf8f5, 0x5b8baea5, 0x5b8dedb5, 0x5bde51db, + 0x5bdedb95, 0x5bfb5373, 0x5bfbe7e5, 0x5c538d83, 0x5c547d74, 0x5ce8d8e5, + 0x5cf7d7f5, 0x5e6bdbe5, 0x5e78b8e5, 0x5e8d86e5, 0x5f6adaf5, 0x5f7d76f5, + 0x5f87a7f5, 0x7dcfc2d7, 0x7ebfb2e7, 0x8dcec2d8, 0x8faea2f8, 0x9ebdb2e9, + 0x9fada2f9, + }; + + map[4] = { + 0x16a, 0x176, 0x1a7, 0x21a, 0x316, 0x321, 0x362, 0x62a, 0x6a3, 0x736, + 0xa31, 0xa73, 0x12712, 0x16bdb, 0x16cec, 0x178b8, 0x179c9, 0x18ad8, + 0x18b8a, 0x18d86, 0x18db8, 0x19ae9, 0x19c9a, 0x19e96, 0x19ec9, 0x1bd7b, + 0x1ce7c, 0x21bdb, 0x21cec, 0x23273, 0x2484a, 0x2595a, 0x26276, 0x318b8, + 0x319c9, 0x32484, 0x32595, 0x346b4, 0x34846, 0x34b41, 0x34b84, 0x356c5, + 0x35956, 0x35c51, 0x35c95, 0x38b28, 0x39c29, 0x46ad4, 0x46ba4, 0x47d46, + 0x4846a, 0x48476, 0x484a7, 0x4a7d4, 0x4ad41, 0x4ad84, 0x4b41a, 0x4ba84, + 0x4d416, 0x4d421, 0x4d462, 0x4d6b4, 0x4d846, 0x4db41, 0x4db84, 0x56ae5, + 0x56ca5, 0x57e56, 0x5956a, 0x59576, 0x595a7, 0x5a7e5, 0x5ae51, 0x5ae95, + 0x5c51a, 0x5ca95, 0x5e516, 0x5e521, 0x5e562, 0x5e6c5, 0x5e956, 0x5ec51, + 0x5ec95, 0x62bdb, 0x62cec, 0x6bdb3, 0x6cec3, 0x738b8, 0x739c9, 0x7a27a, + 0x8a348, 0x8ad38, 0x8b82a, 0x8b8a3, 0x8d386, 0x8d3b8, 0x9a359, 0x9ae39, + 0x9c92a, 0x9c9a3, 0x9e396, 0x9e3c9, 0xbdb31, 0xbdb73, 0xcec31, 0xcec73, + 0x128d812, 0x129e912, 0x16bfefb, 0x16cfdfc, 0x1714b41, 0x1715c51, + 0x178cfc8, 0x179bfb9, 0x18aef8e, 0x18b8cec, 0x18ced8c, 0x18cfca8, + 0x18cfec8, 0x18d8c9c, 0x18dcfc8, 0x18efb8e, 0x18efe86, 0x19adf9d, + 0x19bde9b, 0x19bfba9, 0x19bfdb9, 0x19c9bdb, 0x19dfc9d, 0x19dfd96, + 0x19e9b8b, 0x19ebfb9, 0x1bfe7fb, 0x1cfd7fc, 0x21befbe, 0x21cdfcd, + 0x232d8d3, 0x232e9e3, 0x2484cec, 0x2595bdb, 0x2628d86, 0x2629e96, + 0x28b2db8, 0x29c2ec9, 0x2a49f49, 0x2a58f58, 0x2d4284d, 0x2e5295e, + 0x318fcf8, 0x319fbf9, 0x3249f94, 0x3258f85, 0x346cf4c, 0x34849c9, + 0x349cb49, 0x349f964, 0x349fc94, 0x34b4373, 0x34b4959, 0x34b9f94, + 0x34cf84c, 0x34cfc41, 0x356bf5b, 0x358bc58, 0x358f865, 0x358fb85, + 0x35958b8, 0x35bf95b, 0x35bfb51, 0x35c5373, 0x35c5848, 0x35c8f85, + 0x38fc2f8, 0x39fb2f9, 0x46bcec4, 0x46ced4c, 0x46cf4ec, 0x46cfca4, + 0x4787b84, 0x47d4c9c, 0x47ef4e6, 0x4846cec, 0x48479c9, 0x4849e96, + 0x4849ec9, 0x484ae9e, 0x484c9ca, 0x484cec7, 0x49cad49, 0x49ed496, + 0x49f4976, 0x49f4e96, 0x49f4ec9, 0x49fca49, 0x4a7ef4e, 0x4ab7ab4, + 0x4ad4595, 0x4ad9f49, 0x4ae9ed4, 0x4ae9f4e, 0x4aefe41, 0x4aefe84, + 0x4b41cec, 0x4b4595a, 0x4b67b64, 0x4baf9f4, 0x4bd7bd4, 0x4c9cba4, + 0x4c9ed4c, 0x4ced41c, 0x4ced84c, 0x4cf41ca, 0x4cf41ec, 0x4cf84ca, + 0x4d419c9, 0x4d42595, 0x4d456c5, 0x4d45956, 0x4d45c95, 0x4d4c9c2, + 0x4d849c9, 0x4db4595, 0x4dcfc84, 0x4df6cf4, 0x4df96f4, 0x4dfc9f4, + 0x4ece7d4, 0x4ef421e, 0x4ef84e6, 0x4efb41e, 0x4efb84e, 0x4efe6b4, + 0x4f6aef4, 0x4f9f46a, 0x4f9f4a7, 0x4fecf84, 0x4fef416, 0x4fef462, + 0x56bde5b, 0x56bf5db, 0x56bfba5, 0x56cbdb5, 0x5797c95, 0x57df5d6, + 0x57e5b8b, 0x58bae58, 0x58de586, 0x58f5876, 0x58f5d86, 0x58f5db8, + 0x58fba58, 0x5956bdb, 0x59578b8, 0x5958d86, 0x5958db8, 0x595ad8d, + 0x595b8ba, 0x595bdb7, 0x5a7df5d, 0x5ac7ac5, 0x5ad8de5, 0x5ad8f5d, + 0x5adfd51, 0x5adfd95, 0x5ae5484, 0x5ae8f58, 0x5b8bca5, 0x5b8de5b, + 0x5bde51b, 0x5bde95b, 0x5bf51ba, 0x5bf51db, 0x5bf95ba, 0x5c51bdb, + 0x5c5484a, 0x5c67c65, 0x5caf8f5, 0x5ce7ce5, 0x5dbd7e5, 0x5df521d, + 0x5df95d6, 0x5dfc51d, 0x5dfc95d, 0x5dfd6c5, 0x5e518b8, 0x5e52484, + 0x5e546b4, 0x5e54846, 0x5e54b84, 0x5e5b8b2, 0x5e958b8, 0x5ebfb95, + 0x5ec5484, 0x5ef6bf5, 0x5ef86f5, 0x5efb8f5, 0x5f6adf5, 0x5f8f56a, + 0x5f8f5a7, 0x5fdbf95, 0x5fdf516, 0x5fdf562, 0x62befeb, 0x62cdfdc, + 0x6befe3b, 0x6cdfd3c, 0x738cf8c, 0x739bf9b, 0x84b8cec, 0x85ecf85, + 0x8aefe38, 0x8b2cec8, 0x8b8c5ec, 0x8b8ece3, 0x8ce348c, 0x8cf8c2a, + 0x8cfe38c, 0x8d389c9, 0x8d3fcf8, 0x8da2da8, 0x8eced38, 0x8ef86e3, + 0x8efb8e3, 0x8fa35f8, 0x8fcf8a3, 0x94dbf94, 0x95c9bdb, 0x9adfd39, + 0x9bd359b, 0x9bf9b2a, 0x9bfd39b, 0x9c2bdb9, 0x9c9b4db, 0x9c9dbd3, + 0x9dbde39, 0x9df96d3, 0x9dfc9d3, 0x9e398b8, 0x9e3fbf9, 0x9ea2ea9, + 0x9fa34f9, 0x9fbf9a3, 0xb5e541b, 0xb5efb51, 0xbefbe73, 0xbfefb31, + 0xc4d451c, 0xc4dfc41, 0xcdfcd73, 0xcfdfc31, 0x128fef812, + 0x129fdf912, 0x1714cfc41, 0x1715bfb51, 0x18d8c515c, 0x19e9b414b, + 0x2328fe3f8, 0x2329fd3f9, 0x242d427d4, 0x252e527e5, 0x2628fef86, + 0x2629fdf96, 0x28b2878b8, 0x28bfef2b8, 0x28fc2ecf8, 0x29c2979c9, + 0x29cfdf2c9, 0x29fb2dbf9, 0x2d4f9f24d, 0x2df5295fd, 0x2e5f8f25e, + 0x2ef4284fe, 0x34b49e3e9, 0x34cfc4373, 0x35bfb5373, 0x35c58d3d8, + 0x428427842, 0x46b469e96, 0x4787fcf84, 0x4849e98b8, 0x49f4befb9, + 0x4a7acfca4, 0x4abe9eab4, 0x4cb9cb4ec, 0x4cef7efc4, 0x4cf97f9c4, + 0x4cfdfc7d4, 0x4d45c5848, 0x4fc67c6f4, 0x529527952, 0x56c568d86, + 0x5797fbf95, 0x58f5cdfc8, 0x5958d89c9, 0x5a7abfba5, 0x5acd8dac5, + 0x5bc8bc5db, 0x5bdf7dfb5, 0x5bf87f8b5, 0x5bfefb7e5, 0x5e54b4959, + 0x5fb67b6f5, 0x7bd7b2bdb, 0x7ce7c2cec, 0x8edce5de8, 0x8fea2eaf8, + 0x9debd4ed9, 0x9fda2daf9, 0x242d42e9ed4, 0x252e52d8de5, + 0x2b28b29e98b, 0x2c29c28d89c, 0x428429e9842, 0x47ecb47e7ce, + 0x487845c5484, 0x49cb479c797, 0x4d45c54d7d4, 0x4fb9f479fbf, + 0x529528d8952, 0x57dbc57d7bd, 0x58bc578b787, 0x597954b4595, + 0x5e54b45e7e5, 0x5fc8f578fcf, 0x8ced82ce2c2, 0x8fdcf82cfdf, + 0x9bde92bd2b2, 0x9febf92bfef, 0xbf4efb7ef4f, 0xcf5dfc7df5f, + 0xdf85fd25f8f, 0xef94fe24f9f, + }; + + map[5] = { + 0x16ad, 0x16ba, 0x16db, 0x176d, 0x186a, 0x1876, 0x18a7, 0x1ad8, 0x1b8a, + 0x1d86, 0x1db8, 0x21ad, 0x21ba, 0x21db, 0x321d, 0x3d16, 0x3d62, 0x416a, + 0x4176, 0x41a7, 0x421a, 0x4316, 0x4321, 0x4362, 0x46a3, 0x4736, 0x4a31, + 0x4a73, 0x62ad, 0x62ba, 0x62db, 0x642a, 0x6a3d, 0x6ad4, 0x6b4a, 0x6d42, + 0x73d6, 0x76d4, 0x846a, 0x8476, 0x84a7, 0xa17d, 0xa31d, 0xa73d, 0xa7d4, + 0xad41, 0xad84, 0xb84a, 0xba41, 0xd416, 0xd421, 0xd6b4, 0xd846, 0xdb41, + 0xdb84, 0x12712d, 0x12812a, 0x12d812, 0x132813, 0x138136, 0x167b67, + 0x16aefe, 0x16bcec, 0x16cdfc, 0x16cecd, 0x16cfca, 0x16cfec, 0x16efbe, + 0x176efe, 0x1787b8, 0x179c9d, 0x17a7ba, 0x186cec, 0x187121, 0x187c9c, + 0x189ae9, 0x189c9a, 0x189ec9, 0x18a313, 0x18ce7c, 0x18e9e6, 0x19adf9, + 0x19ae9d, 0x19bf9a, 0x19c9ad, 0x19c9db, 0x19dbf9, 0x19df96, 0x19e96d, + 0x19f96a, 0x19f9a7, 0x19fae9, 0x19fca9, 0x19fe96, 0x19fec9, 0x1a7efe, + 0x1aef8e, 0x1b8cec, 0x1bd7bd, 0x1c9cba, 0x1c9dfc, 0x1c9ecd, 0x1cdf8c, + 0x1ce7dc, 0x1ced8c, 0x1cf8ec, 0x1d89c9, 0x1ef86e, 0x1efb8e, 0x1f976f, + 0x1fcf8a, 0x213b23, 0x21aefe, 0x21bcec, 0x21cdfc, 0x21cecd, 0x21cfca, + 0x21ecfe, 0x232473, 0x23273d, 0x237b23, 0x242a84, 0x243284, 0x2595ad, + 0x2595ba, 0x2595db, 0x26276d, 0x26286a, 0x262876, 0x262d86, 0x2d4284, + 0x316efe, 0x321efe, 0x327387, 0x32959d, 0x3436b4, 0x343846, 0x343b41, + 0x343b84, 0x356c5d, 0x35956d, 0x35c95d, 0x36a3ba, 0x373876, 0x373a87, + 0x3a31ba, 0x3a73ba, 0x3c51dc, 0x3d19c9, 0x3efe62, 0x412712, 0x416cec, + 0x4179c9, 0x419ae9, 0x419c9a, 0x419e96, 0x419ec9, 0x41ce7c, 0x421cec, + 0x42595a, 0x426276, 0x4319c9, 0x432959, 0x4356c5, 0x435956, 0x435c95, + 0x439c29, 0x43c5c1, 0x456ae5, 0x456c5a, 0x457e56, 0x45956a, 0x4595a7, + 0x45a7e5, 0x45ae51, 0x45c51a, 0x45e516, 0x45e562, 0x45e6c5, 0x45e965, + 0x45ec51, 0x45ec95, 0x462cec, 0x46ce3c, 0x4739c9, 0x47a27a, 0x48a348, + 0x495976, 0x495ae9, 0x495c9a, 0x49ae39, 0x49c2a9, 0x49ca39, 0x49e3c9, + 0x4a3595, 0x4ce31c, 0x4ce73c, 0x4e521e, 0x4e9e36, 0x56adf5, 0x56ae5d, + 0x56bf5a, 0x56c5ad, 0x56c5ba, 0x56c5db, 0x57df56, 0x57e56d, 0x58f56a, + 0x58f576, 0x58f5a7, 0x5956ad, 0x5956ba, 0x59576d, 0x59586a, 0x595876, + 0x5958a7, 0x595a7d, 0x595ad8, 0x595b8a, 0x595d6b, 0x595d86, 0x595db8, + 0x5a7df5, 0x5a7e5d, 0x5ad8f5, 0x5adf51, 0x5adf95, 0x5ae51d, 0x5ae95d, + 0x5b8f5a, 0x5bf51a, 0x5bf95a, 0x5c51ad, 0x5c51ba, 0x5c51db, 0x5c95ad, + 0x5c95ba, 0x5c95db, 0x5d6bf5, 0x5d8f56, 0x5db8f5, 0x5dbf95, 0x5df516, + 0x5df521, 0x5df562, 0x5df6c5, 0x5df956, 0x5dfc51, 0x5dfc95, 0x5e516d, + 0x5e521d, 0x5e956d, 0x5ed562, 0x5ed6c5, 0x5edc51, 0x5edc95, 0x5f516a, + 0x5f5176, 0x5f51a7, 0x5f521a, 0x5f5316, 0x5f5362, 0x5f56a3, 0x5f5736, + 0x5f6ae5, 0x5f6c5a, 0x5f7e56, 0x5f956a, 0x5f9576, 0x5fa7e5, 0x5fae95, + 0x5fca95, 0x5fe516, 0x5fe521, 0x5fe562, 0x5fe6c5, 0x5fe956, 0x5fec51, + 0x5fec95, 0x623b23, 0x62aefe, 0x62b676, 0x62bece, 0x62cdfc, 0x62cecd, + 0x62cfec, 0x62efbe, 0x62fcfa, 0x65f52a, 0x67b674, 0x6a3efe, 0x6aef4e, + 0x6b4cec, 0x6bd3bd, 0x6cd45c, 0x6cdf4c, 0x6cecd4, 0x6cf4ca, 0x6ece3d, + 0x6ecf4e, 0x6ef42e, 0x6efb4e, 0x7387b8, 0x73d9c9, 0x73fef6, 0x76efe4, + 0x787b84, 0x79c9d4, 0x7a2b7a, 0x7a72ad, 0x7a7ba4, 0x846cec, 0x8479c9, + 0x849ae9, 0x849c9a, 0x849e96, 0x84c9ec, 0x87b82b, 0x8ad83d, 0x8b28ba, + 0x8b28db, 0x8d863d, 0x8db83d, 0x95f9a7, 0x9a359d, 0x9ad459, 0x9adf49, + 0x9ae3d9, 0x9ae9d4, 0x9b459a, 0x9bf49a, 0x9c2ad9, 0x9c2ba9, 0x9c92db, + 0x9c932d, 0x9c9a3d, 0x9c9ad4, 0x9c9ba4, 0x9c9d42, 0x9c9db4, 0x9d4259, + 0x9d4596, 0x9d45c9, 0x9db459, 0x9dbf49, 0x9df496, 0x9dfc94, 0x9e963d, + 0x9e96d4, 0x9ec93d, 0x9ec9d4, 0x9f496a, 0x9f4976, 0x9f49a7, 0x9f4ae9, + 0x9f4e96, 0x9f4ec9, 0x9fc9a4, 0xa31efe, 0xa5f531, 0xa73fef, 0xa75f53, + 0xa7efe4, 0xaef41e, 0xafe51f, 0xafef84, 0xb238b2, 0xb41cec, 0xb84cec, + 0xbd73bd, 0xbdb3d1, 0xbdb7d4, 0xc51fca, 0xcd419c, 0xcd451c, 0xcd89c4, + 0xcdf41c, 0xcdf84c, 0xce73dc, 0xce7d4c, 0xcec3d1, 0xcec874, 0xcecd41, + 0xcecd84, 0xcfca41, 0xcfca84, 0xcfec41, 0xcfec84, 0xdbf5f1, 0xefb41e, + 0xefb84e, 0xefe2b1, 0xefe416, 0xefe421, 0xefe846, 0xf5321f, + 0x12712efe, 0x12812cec, 0x129df912, 0x129e912d, 0x129f912a, 0x12fe912f, + 0x12fef812, 0x1329f913, 0x1361b613, 0x13813c9c, 0x139f9136, 0x16b6e96e, + 0x16cfc676, 0x17a7cfca, 0x1813c5c1, 0x1815ae51, 0x1815c51a, 0x1815e516, + 0x181c5ec1, 0x181e5e21, 0x181ece31, 0x18715c51, 0x189e9b8b, 0x18b318b3, + 0x18e9e121, 0x197f97c9, 0x19bde9bd, 0x19eabea9, 0x19f9a313, 0x19fbefb9, + 0x1c9cbece, 0x1cdfd7dc, 0x1ce7cfef, 0x1d815c51, 0x1f97121f, 0x213cfc23, + 0x217b2172, 0x21b2e52e, 0x2324e9e3, 0x2325f573, 0x2328d38d, 0x232e9e3d, + 0x237cfc23, 0x242af9f4, 0x242cec84, 0x2432f9f4, 0x245e5284, 0x24d427d4, + 0x25efb7e5, 0x26276efe, 0x26286232, 0x2629df96, 0x2629e96d, 0x262e96fe, + 0x262fef86, 0x26826ece, 0x26f9726f, 0x27842784, 0x27a27a87, 0x2953b239, + 0x29c2fec9, 0x29ced2c9, 0x2b25e562, 0x2b32e9e3, 0x2bc9579c, 0x2be5295e, + 0x2d42f9f4, 0x2e52495e, 0x2e5295ed, 0x2e5295fe, 0x2ef4284e, 0x2f52a95f, + 0x2f53295f, 0x2f9579fb, 0x2fc2632f, 0x2fcf6276, 0x32373fef, 0x325fe8f5, + 0x32739f97, 0x329e3fe9, 0x3436cf4c, 0x343849c9, 0x3439f4c9, 0x343b4373, + 0x343c9cb4, 0x343f9f46, 0x34b34959, 0x34cf834c, 0x35c57387, 0x35c5d737, + 0x36a3686a, 0x36a3cfca, 0x3739f976, 0x373a9f97, 0x373b67b6, 0x373ce87c, + 0x38ba38ba, 0x39ca3ba9, 0x39e3afe9, 0x39e3fe96, 0x39e3fec9, 0x3a31cfca, + 0x3a3595ba, 0x3a73cfca, 0x3abe9e3a, 0x3c53473c, 0x3c5c4384, 0x3ce31fec, + 0x3ce73fec, 0x3cfe8fc2, 0x3e9c289e, 0x3f53c95f, 0x3f59635f, 0x4171b41b, + 0x4171c51c, 0x419e9121, 0x42629e96, 0x434b9f94, 0x434cfc41, 0x457ac57a, + 0x45ae5484, 0x45c5484a, 0x45c67c65, 0x45e54864, 0x45e54b84, 0x45e56b4b, + 0x45e7ce75, 0x48ec548e, 0x48ece348, 0x49597c9c, 0x49aea2a9, 0x49c29ece, + 0x4a34f9f4, 0x4b5e541b, 0x53f53c51, 0x56c5686a, 0x56c56876, 0x56c5b676, + 0x56c5bcec, 0x578795b8, 0x579c795d, 0x579c8795, 0x57ac57ba, 0x57e57876, + 0x57e578a7, 0x57e587b8, 0x57eab7a5, 0x5878b8f5, 0x5898ae95, 0x5898c95a, + 0x5898ec95, 0x58bc578b, 0x59567b67, 0x5957a7ba, 0x595cd89c, 0x595d7bd7, + 0x5989e956, 0x598c9532, 0x5a7abf5a, 0x5a7acad5, 0x5ade8de5, 0x5ae51bab, + 0x5ae95aba, 0x5aefe8f5, 0x5b6bf576, 0x5b8bc5db, 0x5b8bcba5, 0x5bce8bc5, + 0x5bcebc51, 0x5bcebc95, 0x5bdb7df5, 0x5bde95bd, 0x5c67c6d5, 0x5c6c5d86, + 0x5cb79c53, 0x5cbec7e5, 0x5cfca8f5, 0x5d8ded56, 0x5db8de5d, 0x5dbd7e5d, + 0x5de6bde5, 0x5debde51, 0x5df52959, 0x5dfcf8f5, 0x5e78ce75, 0x5ed7ce75, + 0x5f526276, 0x5f571721, 0x5f57a27a, 0x5f797c95, 0x5fa35935, 0x5fac7ac5, + 0x5fe6bf5b, 0x5feb8f5b, 0x5febfb95, 0x5fec7e57, 0x5fec8f58, 0x6268e9e6, + 0x6269f96a, 0x62b69e96, 0x63f5c35f, 0x65fc5676, 0x67cfc674, 0x6a5e5aba, + 0x6cfd3fdc, 0x6ece3fef, 0x717d5c51, 0x73787c9c, 0x7387fcf8, 0x73ec9bce, + 0x73f97c9f, 0x7871fcf8, 0x787fcf84, 0x79f53bf9, 0x7a27afef, 0x7a7cfca4, + 0x7bd7bd2b, 0x7cef47ec, 0x7f97c9f4, 0x849e98b8, 0x8795f259, 0x87cf85fc, + 0x89e9b82b, 0x8ad8a2ad, 0x8b28efbe, 0x8ced38dc, 0x8d3fcf8d, 0x8d89c93d, + 0x8e5f25e7, 0x8ef865fe, 0x8fce72cf, 0x92c2dfc9, 0x9ae9a2ad, 0x9ae9a2ba, + 0x9b2bc932, 0x9bce2bc9, 0x9bcebc94, 0x9bde9b3d, 0x9bf92b32, 0x9bf9b2ba, + 0x9bf9b2db, 0x9bfd3bd9, 0x9c2cfc9a, 0x9c2ec987, 0x9c9bdb3d, 0x9d3bd359, + 0x9dbded49, 0x9df963d3, 0x9dfc9d3d, 0x9e3bce98, 0x9e3febf9, 0x9eabea49, + 0x9f4befb9, 0x9fb2efb9, 0xa72cfca2, 0xad9f9d3d, 0xb6b49e96, 0xb82b5e52, + 0xb8b2bece, 0xb8fec3ef, 0xbc59835c, 0xbef51bfe, 0xc537f53c, 0xcdfcd73d, + 0xcdfcd7d4, 0xce7fec2b, 0xcf538f5c, 0xcfd3fd1c, 0xd45c5484, 0xe529589e, + 0xe98b598e, 0xef5bfe73, 0xf5395fb8, 0xf97c92cf, 0xfc239c2f, + 0x131813fcf8, 0x1361cfc613, 0x16b615e516, 0x17ce7bcebc, 0x1815e518b8, + 0x197c9bc979, 0x1bfe7bfebf, 0x2171cfc217, 0x2329fd3fd9, 0x24d42e9ed4, + 0x252f52a8f5, 0x252f5328f5, 0x254e5247e5, 0x25e7b25e25, 0x26239f9623, + 0x28429e9842, 0x2a27a29f97, 0x2b2129e912, 0x2c29c279cd, 0x2c29c2d89c, + 0x2e5f825f5e, 0x2ec982c9e2, 0x2f5e75e25f, 0x2f952795f2, 0x32e39e389e, + 0x3437fcf437, 0x353f536bf5, 0x353f538f56, 0x35c5d8d3d8, 0x37367cfc67, + 0x395fb35f93, 0x39ecb3ece9, 0x39fbafb3a9, 0x3a6a39f96a, 0x3ce98e93ec, + 0x3cfe38fec3, 0x3e3ce31bce, 0x3e3ce73bce, 0x3ea9e3a89e, 0x41714fcf41, + 0x419e914b41, 0x429c279c79, 0x434b439e39, 0x4529579525, 0x47ec27ec7e, + 0x513c51bc53, 0x529527952d, 0x529528952a, 0x52952d8952, 0x5295895325, + 0x52e527e587, 0x535f53bf51, 0x53bc538bc5, 0x53c537bc53, 0x579f97bf95, + 0x598a359353, 0x5ac7ac5878, 0x5ac8dac58d, 0x5bcb9c79c5, 0x5bfbefb7e5, + 0x5c6c586cec, 0x5dbc7dbc57, 0x5e6ae5868a, 0x5e7e5b6b76, 0x5e8bae58ba, + 0x5f517c515c, 0x626862e52e, 0x6befb3ef3e, 0x6cbec3ecbc, 0x7397f97bf9, + 0x7845c57848, 0x791f971bf9, 0x7975987259, 0x7e7ce7bce4, 0x7fec27ecf7, + 0x87f8cf82cf, 0x8c953895c8, 0x8efc2fc8fe, 0x8f8ef863ef, 0x8f8efb83ef, + 0x8fdcf8d2cf, 0x953c5bc593, 0x9596be969e, 0x968e963e98, 0x97c92c9879, + 0x97c97bc974, 0x97f97bf92b, 0x9895395896, 0x98e98b3e98, 0x9debde92b2, + 0x9eae92aefe, 0x9fadf92a2d, 0xa353f538f5, 0xae3fe8fe3e, 0xbc65cb635c, + 0xbcb9c2b79c, 0xbfbefb3ef1, 0xc2bec27ec2, 0xc2c98c92ca, 0xc2fc238fc2, + 0xc2fca8fc2c, 0xc5d45c7d74, 0xc9ca3fcacf, 0xcfdfc7df5f, 0xdf85f25f8f, + 0xe3febfe73e, 0xe52d7e5de7, 0xe545eb4595, 0xe7ec2ec7ed, 0xece5de8de5, + 0xf4b9f479fb, 0xf53b8f5bfb, 0xf919f319c9, 0xfbfe7febf4, 0xfef429f4f9, + 0x13191f913bf9, 0x1391c91bc913, 0x232e3fe38fe3, 0x24f4ef427ef4, + 0x2528f5278f52, 0x252e52de8de5, 0x252fdf527df5, 0x2a2ea9ea289e, + 0x2b2529527952, 0x2c29c2389c23, 0x2c2ec27ec287, 0x2f49f4279f42, + 0x3539538953b8, 0x353f537bf537, 0x37397c97bc97, 0x39ca3989ca98, + 0x3cafca38fca3, 0x3e39e369b69e, 0x3e3bce38bce3, 0x3e3c6ce386ce, + 0x4597954b4595, 0x4b454e54b7e5, 0x5395369b6953, 0x53c6c5386c53, + 0x5c51715bc517, 0x5f51715fbf51, 0x7d7fdcfd72cf, 0x7e7fe7bfe72b, + 0x8ced8c2cedc2, 0x8f8eaef82aef, + }; + + map[6] = { + 0x16adf, 0x16aed, 0x16afe, 0x16baf, 0x16cad, 0x16cba, 0x16cdb, 0x16dfc, + 0x16fec, 0x176df, 0x176ed, 0x176fe, 0x186af, 0x18f76, 0x18fa7, 0x196ad, + 0x196ba, 0x196db, 0x197d6, 0x1986a, 0x19876, 0x198a7, 0x19ad8, 0x19b8a, + 0x19d86, 0x19db8, 0x1a7ed, 0x1a7fe, 0x1ad8f, 0x1adf9, 0x1ae9d, 0x1afe9, + 0x1b8fa, 0x1bf9a, 0x1c9ad, 0x1c9ba, 0x1c9db, 0x1d86f, 0x1db8f, 0x1dbf9, + 0x1df96, 0x1dfc9, 0x1e96d, 0x1ec9d, 0x1f96a, 0x1f976, 0x1f9a7, 0x1fc9a, + 0x1fe96, 0x1fec9, 0x21adf, 0x21aed, 0x21afe, 0x21baf, 0x21cad, 0x21cba, + 0x21cdb, 0x21dbf, 0x21dfc, 0x21ecd, 0x21fca, 0x21fec, 0x316df, 0x316fe, + 0x321df, 0x321ed, 0x321fe, 0x3df62, 0x3ed16, 0x3ed62, 0x41a7f, 0x421af, + 0x4362f, 0x4f16a, 0x4f176, 0x4f316, 0x4f321, 0x4f62a, 0x4f6a3, 0x4f736, + 0x4fa31, 0x4fa73, 0x516ad, 0x516ba, 0x516db, 0x5176d, 0x5186a, 0x51876, + 0x518a7, 0x51ad8, 0x51db8, 0x521ad, 0x521ba, 0x521db, 0x5316d, 0x53d21, + 0x53d62, 0x5416a, 0x54176, 0x541a7, 0x542a1, 0x54316, 0x54321, 0x54362, + 0x546a3, 0x54736, 0x54a31, 0x54a73, 0x562ba, 0x562db, 0x56a3d, 0x56ad4, + 0x56b4a, 0x56db4, 0x573d6, 0x57d46, 0x5846a, 0x58476, 0x584a7, 0x5a31d, + 0x5a73d, 0x5a7d4, 0x5ad41, 0x5ad84, 0x5b41a, 0x5b84a, 0x5d416, 0x5d421, + 0x5d462, 0x5d846, 0x5db41, 0x5db84, 0x62adf, 0x62aed, 0x62afe, 0x62bfa, + 0x62cad, 0x62cba, 0x62cdb, 0x62dfc, 0x62ecd, 0x62fca, 0x62fec, 0x632fe, + 0x652ad, 0x6542a, 0x6a3df, 0x6a3ed, 0x6a3fe, 0x6ad4f, 0x6adf5, 0x6ae5d, + 0x6af53, 0x6b4fa, 0x6baf5, 0x6c5ad, 0x6c5ba, 0x6c5db, 0x6d42f, 0x6db4f, + 0x6df52, 0x6dfc5, 0x6e52d, 0x6ec5d, 0x6f52a, 0x6f532, 0x6fca5, 0x6fe52, + 0x6fec5, 0x73fe6, 0x763df, 0x763ed, 0x76d4f, 0x76df5, 0x76ed5, 0x76f53, + 0x76fe5, 0x84f6a, 0x84f76, 0x84fa7, 0x86af5, 0x876f5, 0x8f5a7, 0x956ad, + 0x956ba, 0x956db, 0x9576d, 0x9586a, 0x95876, 0x958a7, 0x95a7d, 0x95ad8, + 0x95b8a, 0x95d86, 0x95db8, 0xa17df, 0xa197d, 0xa31df, 0xa31ed, 0xa31fe, + 0xa517d, 0xa73df, 0xa73ed, 0xa73fe, 0xa7d4f, 0xa7df5, 0xa7e5d, 0xa7f53, + 0xad41f, 0xad84f, 0xad8f5, 0xadf51, 0xadf95, 0xae51d, 0xae95d, 0xaf517, + 0xaf531, 0xaf975, 0xafe95, 0xb4f1a, 0xba518, 0xba84f, 0xba8f5, 0xbaf51, + 0xbaf95, 0xc51ad, 0xc51db, 0xc95ad, 0xc95ba, 0xc95db, 0xcba51, 0xd16bf, + 0xd416f, 0xd421f, 0xd5186, 0xd62bf, 0xd6bf5, 0xd864f, 0xd86f5, 0xdb41f, + 0xdb84f, 0xdb8f5, 0xdbf51, 0xdbf95, 0xdf516, 0xdf521, 0xdf956, 0xdfc51, + 0xdfc95, 0xe956d, 0xed16c, 0xed516, 0xed521, 0xedc51, 0xedc95, 0xf16ca, + 0xf516a, 0xf5176, 0xf521a, 0xf5316, 0xf5321, 0xf6ae5, 0xf956a, 0xf9576, + 0xfa7e5, 0xfae51, 0xfc51a, 0xfca95, 0xfe516, 0xfe521, 0xfe965, 0xfec51, + 0xfec95, 0x12712df, 0x12712ed, 0x12712fe, 0x12812af, 0x128132f, + 0x128712f, 0x12912ad, 0x12912ba, 0x129132d, 0x129712d, 0x12d812f, + 0x12d9812, 0x12df912, 0x12e912d, 0x12f912a, 0x12f9712, 0x12fe912, + 0x1329813, 0x132f913, 0x1343fc9, 0x138136f, 0x139163d, 0x1391643, + 0x1396b23, 0x1398136, 0x13f9136, 0x142a914, 0x146ca34, 0x149146a, + 0x1491476, 0x167b67f, 0x167c67d, 0x167fc67, 0x168c68a, 0x16abeab, + 0x16bdbed, 0x16cb676, 0x16cd868, 0x16efbef, 0x178be78, 0x1797dc9, + 0x17a7baf, 0x17a7cad, 0x17a7fca, 0x17e7876, 0x18789b8, 0x187b8cb, + 0x18d86ed, 0x18f78b8, 0x18fa313, 0x19121db, 0x1914321, 0x191ad41, + 0x191b41a, 0x191d416, 0x191db41, 0x197a7ba, 0x198121a, 0x1987121, + 0x19879c9, 0x1989ca9, 0x1989e96, 0x1989ec9, 0x198a313, 0x19a3143, + 0x19b67b6, 0x19bd7bd, 0x19cd89c, 0x19d4214, 0x1a7acba, 0x1a7e787, + 0x1a7eaba, 0x1ad8ede, 0x1aef8ef, 0x1b8acbc, 0x1b8bcdb, 0x1b8cbec, + 0x1b8dbed, 0x1bcb6ec, 0x1bcbec9, 0x1bd7bdf, 0x1bd7ebd, 0x1bde9bd, + 0x1c687c6, 0x1cdf8cf, 0x1ce7bce, 0x1ce7cfe, 0x1e78ce7, 0x1e7ce7d, + 0x1eae9ba, 0x1ecf8ef, 0x1ef86ef, 0x1efb8ef, 0x1efebf9, 0x1f8cf8a, + 0x1f91a31, 0x213b23f, 0x213cb23, 0x213fc23, 0x214c24a, 0x21abeab, + 0x21bdbed, 0x21c232d, 0x21cebce, 0x21efbef, 0x2324f73, 0x2325473, + 0x23273ed, 0x232d8f5, 0x23723fe, 0x2372f53, 0x237c23d, 0x237c243, + 0x237cb23, 0x237fc23, 0x23c2187, 0x23d8fc2, 0x2432584, 0x243c284, + 0x24854ec, 0x248c24a, 0x252ad95, 0x252ba95, 0x253d295, 0x2542a95, + 0x2543295, 0x25925db, 0x259d425, 0x26276df, 0x26276ed, 0x26276fe, + 0x26286af, 0x262876f, 0x26296ad, 0x262976d, 0x2629876, 0x2629d86, + 0x262d86f, 0x262df96, 0x262e96d, 0x262f96a, 0x262f976, 0x262fe96, + 0x26926db, 0x28478fc, 0x2ae8748, 0x2bf3273, 0x2c14324, 0x2c23d62, + 0x2c24176, 0x2c24362, 0x2c26d42, 0x2c2d421, 0x2c62d86, 0x2c68132, + 0x2c87124, 0x2ca7387, 0x2ca8781, 0x2cd4284, 0x2d42584, 0x2d4284f, + 0x2df5295, 0x2e5295d, 0x2ec8478, 0x2f52a95, 0x2f53295, 0x2fc2362, + 0x2fc6276, 0x2fe5295, 0x3164e34, 0x3189eb8, 0x323573d, 0x3248e34, + 0x324f9e3, 0x327387f, 0x327397d, 0x3273987, 0x3273f97, 0x329589e, + 0x329e3d8, 0x32d373f, 0x32e3431, 0x32e3473, 0x32e7387, 0x34356b4, + 0x3435846, 0x3435b84, 0x3436b4f, 0x34384f6, 0x343b84f, 0x343bf95, + 0x347396b, 0x3486e34, 0x34b8e34, 0x34be341, 0x34bf9e3, 0x34bfe73, + 0x353d6c5, 0x353d956, 0x353dc51, 0x353dc95, 0x35436c5, 0x3543956, + 0x3543c95, 0x36a3baf, 0x36a3cad, 0x36a3cba, 0x36a3fca, 0x373876f, + 0x37396ba, 0x3739876, 0x373a87f, 0x373a987, 0x373af97, 0x373f976, + 0x3743bf9, 0x3796243, 0x3796b23, 0x389eb28, 0x3986235, 0x39e6b23, + 0x3a31baf, 0x3a31cad, 0x3a31cba, 0x3a31fca, 0x3a73baf, 0x3a73cba, + 0x3a73fca, 0x3b2f9e3, 0x3b8fe3a, 0x3d8b298, 0x3e3186a, 0x3e346a3, + 0x3e34a31, 0x3e436b4, 0x3e6ab3a, 0x3e76b23, 0x3ea31ba, 0x3eb2318, + 0x3eba8b2, 0x3f536c5, 0x3f53956, 0x3f53c51, 0x3f53c95, 0x417c9bc, + 0x42a8498, 0x42af484, 0x43198c9, 0x432484f, 0x4328498, 0x434b41f, + 0x435cf84, 0x436b4cb, 0x4384986, 0x4384b98, 0x4384fc9, 0x43b41cb, + 0x43b84cb, 0x43c9f4b, 0x4546ae5, 0x4546c5a, 0x4546ec5, 0x4547e56, + 0x454956a, 0x4549576, 0x45495a7, 0x454a7e5, 0x454ae51, 0x454ae95, + 0x454c51a, 0x454c95a, 0x454e516, 0x454e956, 0x454ec95, 0x45e4562, + 0x46ad4ed, 0x46b4cba, 0x46b4dcb, 0x473cb9c, 0x47d4ed6, 0x4846cad, + 0x4849876, 0x484ba98, 0x484db98, 0x486ca34, 0x48948a7, 0x4894ad8, + 0x489a348, 0x4a34584, 0x4a3484f, 0x4a35495, 0x4a7d4ed, 0x4ad41ed, + 0x4ad84ed, 0x4b84dcb, 0x4bcdb73, 0x4cb9ca3, 0x4d416ed, 0x4d421ed, + 0x4d864ed, 0x4db41ed, 0x4db84ed, 0x4de4d62, 0x4de6b4d, 0x4f12712, + 0x4f26276, 0x4f7a27a, 0x512712d, 0x512812a, 0x512d812, 0x5138136, + 0x5167b67, 0x517a7ba, 0x5186cec, 0x5187121, 0x518a313, 0x51bd7bd, + 0x5237b23, 0x5242a84, 0x526276d, 0x526286a, 0x5262d86, 0x52b2321, + 0x5327387, 0x53a31ba, 0x54179c9, 0x5426276, 0x5434b41, 0x543c5c1, + 0x545ec51, 0x5471271, 0x547a27a, 0x54e5e21, 0x5626876, 0x562b232, + 0x562b676, 0x56a3bab, 0x56bd3bd, 0x5736878, 0x57378a7, 0x57387b8, + 0x573a7ba, 0x57871b8, 0x5787b84, 0x57a27ad, 0x57a2b7a, 0x57ab47a, + 0x5813281, 0x58ad38d, 0x58b278b, 0x58b28db, 0x59a359d, 0x59ad459, + 0x59b459a, 0x59d4596, 0x59db459, 0x5b238b2, 0x5b28b2a, 0x5b6b476, + 0x5b8d3bd, 0x5bd31bd, 0x5bd73bd, 0x5bdb7d4, 0x5c9d45c, 0x5cd45c1, + 0x5cecd84, 0x5d38d36, 0x5d456c5, 0x623b23f, 0x624c24a, 0x626986a, + 0x62969ba, 0x62ae787, 0x62bdbed, 0x62c676d, 0x62c6876, 0x62c868a, + 0x62cb232, 0x62cb676, 0x62cbcec, 0x62eabea, 0x62efbef, 0x6324e34, + 0x672b67f, 0x67b674f, 0x67b67f5, 0x67c67d5, 0x67c6875, 0x67cb675, + 0x67f5267, 0x67fc675, 0x68c68a5, 0x69a3d89, 0x6abeab5, 0x6aef4ef, + 0x6b4cbec, 0x6bdb3df, 0x6bdbed5, 0x6c5bcec, 0x6c5d868, 0x6cbec3e, + 0x6cf4cfa, 0x6db3ed3, 0x6dfc4f4, 0x6ec3e43, 0x6ecd3e3, 0x6ece3fe, + 0x6ecf4ef, 0x6ef42ef, 0x6efb4ef, 0x6efbef5, 0x73697d9, 0x73876cb, + 0x73879b8, 0x7387b8f, 0x7397dc9, 0x73987c9, 0x73ac687, 0x73cb679, + 0x73db8cb, 0x73dcbc6, 0x73e6878, 0x73e87b8, 0x73f97c9, 0x7634e34, + 0x768e785, 0x76efe4f, 0x78795b8, 0x787b84f, 0x787fc51, 0x78fc537, + 0x79c1943, 0x7a27adf, 0x7a27afe, 0x7a2cb7a, 0x7a2fc7a, 0x7a72baf, + 0x7a7ba4f, 0x7a7baf5, 0x7a7cad5, 0x7a7cba5, 0x7a7f52a, 0x7a7fca5, + 0x7c9bc53, 0x7d4cb9c, 0x7e587b8, 0x7f51721, 0x7f971c9, 0x7f97c95, + 0x846898a, 0x8478b98, 0x84798c9, 0x8489d86, 0x8498ae9, 0x8498c9a, + 0x8498e96, 0x8498ec9, 0x84bae78, 0x84bc78b, 0x84e786c, 0x84edce7, + 0x86c24ec, 0x8795248, 0x87b82bf, 0x87b82cb, 0x87b8cb5, 0x87f8b85, + 0x8981ae9, 0x898ae95, 0x898c95a, 0x898e521, 0x898ec95, 0x89b82ae, + 0x89e5248, 0x89e54b8, 0x8ad3e8d, 0x8ad83df, 0x8ad8ed5, 0x8b2a798, + 0x8b2ae78, 0x8b2c8ba, 0x8b82aed, 0x8b8cba5, 0x8b8d2cb, 0x8b8dcb5, + 0x8b98e52, 0x8d863df, 0x8d863ed, 0x8d86ed5, 0x8db83ed, 0x8db8ed5, + 0x8e75248, 0x8fce72c, 0x91491a7, 0x919a31d, 0x9567b67, 0x957a7ba, + 0x95879c9, 0x95bd7bd, 0x95c79cd, 0x95cd89c, 0x95e89e6, 0x97dc92c, + 0x98135c9, 0x9862c32, 0x987c92c, 0x98ae93e, 0x98c92ca, 0x98c9532, + 0x98c9e2c, 0x98e963e, 0x98ec93e, 0x9adf94f, 0x9ae39ed, 0x9ae9f3e, + 0x9bf94fa, 0x9c29cba, 0x9c29cdb, 0x9c29dfc, 0x9c29ecd, 0x9c29fec, + 0x9c2d89c, 0x9c9a3df, 0x9ca3d89, 0x9dbf49f, 0x9df964f, 0x9dfc94f, + 0x9e3c9fe, 0x9e3feb8, 0x9f49f6a, 0x9f49fa7, 0x9f4ae9f, 0x9f4c9fa, + 0x9f4ec9f, 0x9f9764f, 0x9fe964f, 0xa27a2ed, 0xa2c7a2d, 0xa2d8fc2, + 0xa348e34, 0xa3e3473, 0xa73797d, 0xa73cacd, 0xa73e787, 0xa73eaba, + 0xa78e785, 0xa7d4bcb, 0xa7ef4ef, 0xab7eab5, 0xabeab51, 0xabeab95, + 0xae349e3, 0xaef41ef, 0xaefe8f5, 0xaf53959, 0xafe84f8, 0xb236298, + 0xb238b2f, 0xb238cb2, 0xb41abcb, 0xb41cbec, 0xb84acbc, 0xb84cbec, + 0xb8a2bf2, 0xb8cb2ec, 0xb8cba3e, 0xb8cbec5, 0xb8d3acb, 0xb8fec3e, + 0xbc5318b, 0xbcb5316, 0xbcbec95, 0xbd73ebd, 0xbd7bdf5, 0xbdb3df1, + 0xbdb3ed1, 0xbdb7ed5, 0xbdbed51, 0xbdbed95, 0xc23d9c2, 0xc24179c, + 0xc2419ec, 0xc2439c2, 0xc249c2a, 0xc2ec7d4, 0xc51ebce, 0xc534b73, + 0xc92cd42, 0xc9ac2cd, 0xc9b2c32, 0xc9bce2c, 0xc9c2fca, 0xc9e3bce, + 0xc9ed4bc, 0xca37943, 0xcb2179c, 0xcbec3e1, 0xcbec73e, 0xcbec7e5, + 0xcdf84cf, 0xcdfc4f1, 0xcdfc8f5, 0xce2c417, 0xce73ced, 0xce7d4bc, + 0xce7db2c, 0xcec3ed1, 0xcec7d4f, 0xcecf3e1, 0xcecf73e, 0xcfca4f1, + 0xcfca84f, 0xcfca8f5, 0xcfec4f1, 0xcfec84f, 0xd3b8d3f, 0xd3bdf73, + 0xd3bf9e3, 0xd428498, 0xd4b41cb, 0xd7bd74f, 0xd7fc537, 0xd8498c9, + 0xdb28b2f, 0xe3186ce, 0xe34c9e3, 0xe34ce31, 0xe34ce73, 0xe3ced84, + 0xe73ce87, 0xe78a72c, 0xe78ce75, 0xe963e43, 0xe96de3e, 0xec54384, + 0xec9d3e3, 0xecd7e57, 0xecf8d3e, 0xefb84ef, 0xefbef51, 0xefe4f16, + 0xefe4f21, 0xefe84f6, 0xefe8f56, 0xefeb4f1, 0xf5318b8, 0xf8e5248, + 0xf8ecf85, 0xf8ef5b8, 0xf97c92c, 0xf9e39e6, 0xfbefb95, 0xfc239c2, + 0xfe7ce75, 0x124914712, 0x127128e78, 0x128d812ed, 0x129e8912e, + 0x12f8ef812, 0x1315813b8, 0x1319143c9, 0x132b9123b, 0x1361b613f, + 0x1361c613d, 0x1361c6143, 0x1361fc613, 0x136813c68, 0x136cb6136, + 0x1396b6136, 0x1461c6a14, 0x1467c6147, 0x14712e714, 0x1476e7147, + 0x14914c9ca, 0x14e714ce7, 0x168a6ea68, 0x16c6d4164, 0x16c868cec, + 0x176be76b7, 0x179f97bf9, 0x17a7ca787, 0x17d7bd7cb, 0x18131b8cb, + 0x1898be98b, 0x18b8abeab, 0x18ced8ced, 0x18dad8cad, 0x18fb318b3, + 0x18fc78fc8, 0x191319c9d, 0x1913a31ba, 0x191419ae9, 0x1914b34b1, + 0x191d3bd31, 0x1941479c9, 0x194149ec9, 0x196be969e, 0x1983138b8, + 0x1983139c9, 0x19cd419c4, 0x1a7e71417, 0x1c979bc97, 0x1cfd7fdcf, + 0x1ebfe7bfe, 0x1f91319c9, 0x214717c24, 0x214aea24a, 0x217b2172f, + 0x217cb2172, 0x217fc2127, 0x2183c2183, 0x23258d38d, 0x2328d38df, + 0x2329e389e, 0x2329e39fe, 0x232de8d3e, 0x232e349e3, 0x232e39e3d, + 0x2378c2378, 0x23d8c23d3, 0x242a9f49f, 0x242c2ec84, 0x24329f49f, + 0x248a2ea24, 0x248e5e254, 0x24d427d4f, 0x24d47dc24, 0x254e527e5, + 0x25df85f25, 0x25f52a8f5, 0x26239623d, 0x2623f9623, 0x26249624a, + 0x26286232f, 0x2628d86ed, 0x262962d42, 0x26298e96e, 0x262e7e876, + 0x262f8ef86, 0x26b96b276, 0x278425784, 0x27952795d, 0x279528795, + 0x27a27a87f, 0x27a27a97d, 0x27a27a987, 0x27a27af97, 0x27a2e7a87, + 0x27a2eab7a, 0x2842784f2, 0x286239862, 0x28952a895, 0x289532895, + 0x28b278b98, 0x28b28ba98, 0x28b28db98, 0x28b2db8ed, 0x2952d8952, + 0x296243962, 0x296b23962, 0x2989e5925, 0x2b238b298, 0x2c1287128, + 0x2c1424cec, 0x2c212712d, 0x2c212812a, 0x2c212d812, 0x2c6286cec, + 0x2c7842784, 0x2ca27a287, 0x2d425d7d4, 0x2d4284ded, 0x2d42f49f4, + 0x2e527e587, 0x2e52e7e5d, 0x2e52e7fe5, 0x2e52fe8f5, 0x2ea2462ea, + 0x2ea6826ea, 0x2ef4824ef, 0x2f532f8f5, 0x2f9527952, 0x3168e3183, + 0x318be3183, 0x3218e3183, 0x327349734, 0x32be32b73, 0x32d38d398, + 0x3436cf4cf, 0x3438498c9, 0x3439bf49f, 0x3439f49f6, 0x3439f4c9f, + 0x343cf41cf, 0x343f84cf8, 0x34be34737, 0x35378c537, 0x353dc5373, + 0x35483c548, 0x35c813581, 0x35f536bf5, 0x35f538f56, 0x35f53bf51, + 0x36a3696ad, 0x36a3696ba, 0x36a36986a, 0x36a36f96a, 0x36a3c686a, + 0x36a68f36a, 0x36aca34a3, 0x37367b67f, 0x373687c68, 0x3736c67d6, + 0x37387b8cb, 0x373b67cb6, 0x373b967b6, 0x373fc67c6, 0x3793bd7bd, + 0x389538956, 0x38ba38baf, 0x38d38d986, 0x38d38db98, 0x38d3a8d98, + 0x38d3b8dcb, 0x397349736, 0x39734a973, 0x3979a73ba, 0x39895c935, + 0x39ca389ca, 0x39ca39cad, 0x39ca39fca, 0x3a31ca343, 0x3a348ca34, + 0x3a783ca78, 0x3ac8d3a8d, 0x3b2e328b2, 0x3b4375b43, 0x3b437b43f, + 0x3b459b435, 0x3b8bacb3a, 0x3bc538bc5, 0x3bc53b6c5, 0x3bc53bc51, + 0x3bd31bdcb, 0x3bd73bdcb, 0x3c537bc53, 0x3c537c543, 0x3c537cf53, + 0x3c9cacb3a, 0x3ca349ca3, 0x3ca34ca73, 0x3cbc5c935, 0x3cf53c8f5, + 0x3d36bd3cb, 0x3e213b23b, 0x3e318a313, 0x3e3b2b632, 0x3e73b67b6, + 0x3ea6a386a, 0x3eba8ba3a, 0x3f53fb8f5, 0x3f53fbf95, 0x414914e96, + 0x41714b4f1, 0x4171b41cb, 0x436b4696b, 0x437b437cb, 0x438468c68, + 0x439cb49cb, 0x452e52495, 0x454797c95, 0x4548ae548, 0x4548e5486, + 0x454ac7ac5, 0x454be541b, 0x454c5484a, 0x454c6c576, 0x454e546b4, + 0x454ec7e57, 0x462ce42c4, 0x46ad4cadc, 0x46b4696ba, 0x46b4abeab, + 0x46b4cb676, 0x46b4d696b, 0x46ced4ced, 0x47145b471, 0x4715c4571, + 0x47abc4b7a, 0x47d4797d6, 0x47d479a7d, 0x47d497dc9, 0x47d4a7cad, + 0x48467c687, 0x48478be78, 0x48478e786, 0x484c68c6a, 0x484dc68c6, + 0x484e78ce7, 0x484e78ea7, 0x49cb4d9cb, 0x49ed49ed6, 0x49ed4c9ed, + 0x4a34f49f4, 0x4ad41acad, 0x4ad84acad, 0x4aed49ed4, 0x4b41abeab, + 0x4b84eabea, 0x4bc7d74db, 0x4c2462c76, 0x4c249c2ec, 0x4c548ec54, + 0x4c9cbc4ba, 0x4ced41ced, 0x4ced84ced, 0x4db7d497d, 0x4e54b8e54, + 0x4ece7de4d, 0x51361b613, 0x5181ae51e, 0x5181ec51e, 0x518c515ca, + 0x51d81c51c, 0x5217b2172, 0x521b2e52e, 0x526286232, 0x52a27a287, + 0x52be5295e, 0x537367b67, 0x538ba38ba, 0x53a3595ba, 0x53a6a386a, + 0x562b25e52, 0x5715c571d, 0x5715c5871, 0x5815e5281, 0x5815e5816, + 0x58da2da8d, 0x592593b23, 0x59d3bd359, 0x5b25e58b2, 0x5b7db27db, + 0x5d45c5484, 0x62c868232, 0x67cfc674f, 0x68a6ea685, 0x69b674b69, + 0x6bfbefb3e, 0x6c5e86ce8, 0x6c686ec3e, 0x6cfdfc3df, 0x719417b41, + 0x71f5c1571, 0x7349734c9, 0x7387cf8cf, 0x739f97bf9, 0x73bc97bc9, + 0x76be76b75, 0x787489248, 0x787cf8cf5, 0x787f4cf84, 0x79f97bf95, + 0x7a72c42a4, 0x7a7ca7875, 0x7a7cfca4f, 0x7bd74debd, 0x7bd7bd2bf, + 0x7bd7bd2cb, 0x7bd7bdcb5, 0x7c97bc957, 0x7ce7bce2c, 0x7ce7ced2c, + 0x7ce7cfe2c, 0x7ce7fec4f, 0x7e78ce72c, 0x7f97c94f4, 0x8468c68ec, + 0x8498be98b, 0x87f8cf82c, 0x898a35935, 0x898c92c32, 0x8ad82adf2, + 0x8ad8a2aed, 0x8ad8a2cad, 0x8adac58ad, 0x8b28efbef, 0x8b2eae8ba, + 0x8b8abeab5, 0x8b8cbec3e, 0x8ced8ced5, 0x8cf8cf2ca, 0x8cf8cfe2c, + 0x8cf8dcf2c, 0x8cf8ecf3e, 0x8d3cf8dcf, 0x8d89c3d89, 0x8e318ce31, + 0x8e98be985, 0x8eced3e8d, 0x8ef86ef3e, 0x8efb8ef3e, 0x8f8aef83e, + 0x95e6b69e6, 0x96b6e963e, 0x98ae9a2ae, 0x98be98b3e, 0x9adf93df3, + 0x9ae93eaba, 0x9ae9a2aed, 0x9ae9fa2ae, 0x9bde39ebd, 0x9bf92bf32, + 0x9bf9b2bfa, 0x9bf9b2dbf, 0x9bf9db3df, 0x9c9bce4bc, 0x9df96d3df, + 0x9dfc9d3df, 0x9e3bfb9fe, 0x9febf92bf, 0xa2eab9ea2, 0xa35f538f5, + 0xb6296be96, 0xbcb9c279c, 0xbefb9f4ef, 0xbefbef3e1, 0xbefbef73e, + 0xbefbef7e5, 0xcdfc7df57, 0xcdfcd73df, 0xcdfcd7d4f, 0xcdfd31cdf, + 0xce348ce34, 0xcfc238fc2, 0xd427d4ed7, 0xea72ae42a, 0xf97bf94fb, + 0x1241914e912, 0x131813f8cf8, 0x1319f913bf9, 0x131c91bc913, + 0x14161c614ec, 0x1461a6ea614, 0x147a71ca714, 0x14b714be714, + 0x2181a2ea218, 0x21c21812ece, 0x232be32b9e3, 0x232d3fd9fd3, + 0x232ef8fe3fe, 0x252be5257e5, 0x258f5278f52, 0x25e52de8de5, + 0x25fdf527df5, 0x262b76e76b6, 0x28b298be98b, 0x2a2da8da298, + 0x2b87eb872b8, 0x31e31b613b6, 0x32739732b23, 0x3437f4cf437, + 0x34b43e349e3, 0x359538953b8, 0x35f537bf537, 0x36a96a3a439, + 0x36c686c53c5, 0x373467c6734, 0x395369b6953, 0x3aba38ba398, + 0x3ba3fb9fba3, 0x3cfca38fca3, 0x3d38d368c68, 0x3d3b6bd396b, + 0x3d3cbd9cbd3, 0x3e326238623, 0x4171f4fcf41, 0x42c249c279c, + 0x42d427d497d, 0x45295795245, 0x48427842e78, 0x48468a6ea68, + 0x4847a78ca78, 0x4c24ec27ec2, 0x4c9acad4adc, 0x4d7d467c67d, + 0x4da6ad496ad, 0x516be51615e, 0x51e51815eb8, 0x526826e525e, + 0x535c538d38d, 0x59e545eb459, 0x5c54d45c7d4, 0x5c58457845c, + 0x67962462967, 0x69eb69e4b69, 0x715f517bf51, 0x71c517bc517, + 0x7a7eba4baea, 0x7d7fdcfd72c, 0x7ecb4ecb7ec, 0x83a318ca318, + 0x842984e9842, 0x8ded8ced82c, 0x8f8eaef82ae, 0x91721b21791, + 0x9379437b437, 0x97f97bf92bf, 0x9c97bc974bc, 0x9fdadf92adf, + 0xa24ea249ea2, 0xa2a79a72bab, 0xbd4ed9ed4bd, 0xd7db2db7ede, + 0xef94f24f9ef, 0xf4bfe7febf4, + }; + + map[22] = { + 0x1a, 0x1232, 0x1676, 0x1bdb, 0x1cec, 0x2362, 0x262a, 0x3273, 0x373a, + 0x484a, 0x595a, 0x6276, 0x7367, 0x121712, 0x124d42, 0x125e52, + 0x131613, 0x134b43, 0x135c53, 0x168d86, 0x169e96, 0x178b87, 0x179c97, + 0x1bfefb, 0x1cfdfc, 0x232484, 0x232595, 0x238b28, 0x239c29, 0x24d462, + 0x25e562, 0x262bdb, 0x262cec, 0x27a27a, 0x28b2a8, 0x29c2a9, 0x328d38, + 0x329e39, 0x34b473, 0x35c573, 0x36a36a, 0x373bdb, 0x373cec, 0x38d3a8, + 0x39e3a9, 0x467684, 0x46b46a, 0x46b476, 0x47d467, 0x47d47a, 0x484cec, + 0x48bdb4, 0x4b6db4, 0x4d7bd4, 0x4f9f4a, 0x567695, 0x56c56a, 0x56c576, + 0x57e567, 0x57e57a, 0x595bdb, 0x59cec5, 0x5c6ec5, 0x5e7ce5, 0x5f8f5a, + 0x628d86, 0x629e96, 0x738b87, 0x739c97, 0x8b2db8, 0x8d3bd8, 0x9c2ec9, + 0x9e3ce9, 0xb4384b, 0xb8478b, 0xc5395c, 0xc9579c, 0xd4284d, 0xd8468d, + 0xe5295e, 0xe9569e, 0x1218d812, 0x1219e912, 0x124efe42, 0x125dfd52, + 0x1318b813, 0x1319c913, 0x134cfc43, 0x135bfb53, 0x1614d416, 0x1615e516, + 0x168efe86, 0x169dfd96, 0x1714b417, 0x1715c517, 0x178cfc87, 0x179bfb97, + 0x1b45e54b, 0x1b89e98b, 0x1c54d45c, 0x1c98d89c, 0x23249f94, 0x23258f85, + 0x238cfc28, 0x239bfb29, 0x24d42959, 0x24d49c29, 0x24efe462, 0x25dfd562, + 0x25e52848, 0x25e58b28, 0x262befeb, 0x262cdfdc, 0x28b82cec, 0x28dad82a, + 0x28fcf82a, 0x29c92bdb, 0x29eae92a, 0x29fbf92a, 0x328efe38, 0x329dfd39, + 0x34b43959, 0x34b49e39, 0x34cfc473, 0x35bfb573, 0x35c53848, 0x35c58d38, + 0x373dcfcd, 0x373ebfbe, 0x38bab83a, 0x38d83ece, 0x38fef83a, 0x39cac93a, + 0x39e93dbd, 0x39fdf93a, 0x42d427d4, 0x43b436b4, 0x46769f94, 0x469e9684, + 0x46ad46a6, 0x46b49e96, 0x46b6cec4, 0x46cfc6a4, 0x479c9784, 0x47ab47a7, + 0x47d49c97, 0x47d7ece4, 0x47efe7a4, 0x484befbe, 0x484cfdfc, 0x49cb4ec9, + 0x49ed4ce9, 0x49f49cec, 0x49f4bdb9, 0x4abc9cb4, 0x4ade9ed4, 0x4b6efb4e, + 0x4bc9cdb4, 0x4c9f479c, 0x4cfd9f4c, 0x4d7cfd4c, 0x4db9ed49, 0x4e9f469e, + 0x4efb9f4e, 0x4f6cf476, 0x4f7ef467, 0x4fc6ecf4, 0x4fe7cef4, 0x52e527e5, + 0x53c536c5, 0x56768f85, 0x568d8695, 0x56ae56a6, 0x56bfb6a5, 0x56c58d86, + 0x56c6bdb5, 0x578b8795, 0x57ac57a7, 0x57dfd7a5, 0x57e58b87, 0x57e7dbd5, + 0x58bc5db8, 0x58de5bd8, 0x58f58bdb, 0x58f5cec8, 0x595bfefb, 0x595cdfcd, + 0x5acb8bc5, 0x5aed8de5, 0x5b8f578b, 0x5bfe8f5b, 0x5c6dfc5d, 0x5cb8bec5, + 0x5d8f568d, 0x5dfc8f5d, 0x5e7bfe5b, 0x5ec8de58, 0x5f6bf576, 0x5f7df567, + 0x5fb6dbf5, 0x5fd7bdf5, 0x628ef86e, 0x629df96d, 0x738cf87c, 0x739bf97b, + 0x86d863d8, 0x87b872b8, 0x8b2efb8e, 0x8c5f835c, 0x8d3cfd8c, 0x8e5f825e, + 0x8fc2ecf8, 0x8fe3cef8, 0x96e963e9, 0x97c972c9, 0x9b4f934b, 0x9c2dfc9d, + 0x9d4f924d, 0x9e3bfe9b, 0x9fb2dbf9, 0x9fd3bdf9, 0xb45e584b, 0xb849e98b, + 0xbe54b95e, 0xbe98b59e, 0xbf5395fb, 0xbf9579fb, 0xc54d495c, 0xc958d89c, + 0xcd45c84d, 0xcd89c48d, 0xcf4384fc, 0xcf8478fc, 0xdf5295fd, 0xdf9569fd, + 0xef4284fe, 0xef8468fe, 0x1218ef812e, 0x1219df912d, 0x1318cf813c, + 0x1319bf913b, 0x1614ef416e, 0x1615df516d, 0x1714cf417c, 0x1715bf517b, + 0x1b419e914b, 0x1b815e518b, 0x1c518d815c, 0x1c914d419c, 0x2428427842, + 0x2529527952, 0x2a28fea2f8, 0x2a29fda2f9, 0x2b2db27db2, 0x2c2ec27ec2, + 0x3438436843, 0x3539536953, 0x3a38fca3f8, 0x3a39fba3f9, 0x3bd3bd6bd3, + 0x3ce3ce6ce3, 0x42d42e9ed4, 0x43b43c9cb4, 0x49cb4797c9, 0x49ed4696e9, + 0x4a6aef4ea6, 0x4a7acf4ca7, 0x4ab9eab4e9, 0x4ad9cad4c9, 0x4b45e546b4, + 0x4d45c547d4, 0x4fbefb7ef4, 0x4fc6dfcdf4, 0x52e52d8de5, 0x53c53b8bc5, + 0x58bc5787b8, 0x58de5686d8, 0x5a6adf5da6, 0x5a7abf5ba7, 0x5ac8dac5d8, + 0x5ae8bae5b8, 0x5c54d456c5, 0x5e54b457e5, 0x5fb6efbef5, 0x5fcdfc7df5, + 0x8b289e98b8, 0x8d389c98d8, 0x8fbefb3ef8, 0x8fc2dfcdf8, 0x9c298d89c9, + 0x9e398b89e9, 0x9fb2efbef9, 0x9fcdfc3df9, 0xbf49f479fb, 0xbf538f58fb, + 0xcf439f49fc, 0xcf58f578fc, 0xdf49f469fd, 0xdf528f58fd, 0xef429f49fe, + 0xef58f568fe, 0x2428429e9842, 0x2529528d8952, 0x2b29edbed2b9, + 0x2c28decde2c8, 0x3438439c9843, 0x3539538b8953, 0x3d39cbdcb3d9, + 0x3e38bcebc3e8, 0x42f4ef427ef4, 0x43f4cf436cf4, 0x4846845e5486, + 0x4847845c5487, 0x4b7ecb47e7ce, 0x4ced6ced46c6, 0x52f5df527df5, + 0x53f5bf536bf5, 0x5956954d4596, 0x5957954b4597, 0x5bde6bde56b6, + 0x5c7dbc57d7bd, 0x86f8ef863ef8, 0x87f8cf872cf8, 0x96f9df963df9, + 0x97f9bf972bf9, + }; + + map[32] = { + 0x16d, 0x21d, 0x62d, 0x16343, 0x16787, 0x16efe, 0x178a7, 0x187b8, + 0x19c9d, 0x1a7ba, 0x1aba6, 0x1b8ab, 0x21343, 0x21aba, 0x21efe, 0x23473, + 0x24384, 0x2595d, 0x27387, 0x27871, 0x28478, 0x3186a, 0x34362, 0x346a3, + 0x34736, 0x34a31, 0x34a73, 0x373d6, 0x3a31d, 0x3a73d, 0x3ad84, 0x4176b, + 0x436b4, 0x43846, 0x43b41, 0x43b84, 0x484d6, 0x4b41d, 0x4b84d, 0x4bd73, + 0x56c5d, 0x595d6, 0x5c51d, 0x5c95d, 0x62787, 0x62aba, 0x62efe, 0x6a3ba, + 0x6b4ab, 0x73a87, 0x73db8, 0x76b23, 0x78376, 0x78a72, 0x7a27d, 0x84b78, + 0x84da7, 0x86a24, 0x87486, 0x87b82, 0x8b28d, 0x9c2d9, 0xa2417, 0xa31ba, + 0xa73ba, 0xa7d4b, 0xab7a2, 0xb2318, 0xb41ab, 0xb84ab, 0xb8d3a, 0xba8b2, + 0xd17a7, 0xd18b8, 0xd2373, 0xd2484, 0xd36a3, 0xd46b4, + 0x1318163, 0x13439c9, 0x13813b8, 0x1417164, 0x14714a7, 0x1635f53, + 0x1645e54, 0x178ce7c, 0x179f9a7, 0x17d7bd7, 0x187cf8c, 0x189e9b8, + 0x18d8ad8, 0x19ae9ba, 0x19bf9ab, 0x19eae9d, 0x19eafe9, 0x19fbef9, + 0x19fbf9d, 0x1a686a6, 0x1a7aefe, 0x1a7cfca, 0x1acfca6, 0x1b676b6, + 0x1b8bfef, 0x1b8cecb, 0x1bcecb6, 0x1c9787c, 0x1c9caba, 0x1c9cefe, + 0x1ce7fec, 0x1cf8efc, 0x1d7ece7, 0x1d8fcf8, 0x1e9896e, 0x1e98c9e, + 0x1ec9bce, 0x1f9796f, 0x1f97c9f, 0x1fc9acf, 0x2132b23, 0x2142a24, + 0x21acfca, 0x21bcecb, 0x232b273, 0x2349e39, 0x235f573, 0x23d38d3, + 0x242a284, 0x2439f49, 0x245e584, 0x24d47d4, 0x257e587, 0x258f578, + 0x2595aba, 0x25e7e5d, 0x25e7fe5, 0x25f8ef5, 0x25f8f5d, 0x2714171, + 0x2737efe, 0x2739f97, 0x279f971, 0x2813181, 0x2848fef, 0x2849e98, + 0x289e981, 0x2953439, 0x2959787, 0x2959efe, 0x29e3fe9, 0x29f4ef9, + 0x2d3e9e3, 0x2d4f9f4, 0x2e5451e, 0x2e5495e, 0x2e9589e, 0x2f5351f, + 0x2f5395f, 0x2f9579f, 0x3181a31, 0x31f96af, 0x3435956, 0x343c95c, + 0x343c9c2, 0x346ce3c, 0x34739c9, 0x348a348, 0x349ca39, 0x349e396, + 0x349e3c9, 0x34a3595, 0x34ae9e3, 0x34ce31c, 0x34ce73c, 0x35f5362, + 0x35f56a3, 0x35f5736, 0x35f5a73, 0x362b232, 0x36a3efe, 0x37387b8, + 0x3763fef, 0x39ca3d9, 0x39e3d96, 0x39e3dc9, 0x3a3595d, 0x3a5f531, + 0x3a73fef, 0x3b238b2, 0x3ce3d1c, 0x3ce73dc, 0x3cecd84, 0x3d36bd3, + 0x3d38d36, 0x3d3bd31, 0x3db8d3d, 0x3dbd73d, 0x4171b41, 0x41e96be, + 0x436cf4c, 0x437b437, 0x43849c9, 0x439cb49, 0x439f496, 0x439f4c9, + 0x43b4595, 0x43bf9f4, 0x43cf41c, 0x43cf84c, 0x45e5462, 0x45e56b4, + 0x45e5846, 0x45e5b84, 0x462a242, 0x46b4fef, 0x48478a7, 0x4864efe, + 0x49cb4d9, 0x49f4d96, 0x49f4dc9, 0x4a247a2, 0x4b4595d, 0x4b5e541, + 0x4b84efe, 0x4cf4d1c, 0x4cf84dc, 0x4cfcd73, 0x4d46ad4, 0x4d47d46, + 0x4d4ad41, 0x4da7d4d, 0x4dad84d, 0x56ae5ba, 0x56bf5ab, 0x56c5343, + 0x56c5787, 0x56c5aba, 0x56c5efe, 0x5787956, 0x57ac587, 0x57e576d, + 0x57e5876, 0x57e5a7d, 0x57e5a87, 0x57e5db8, 0x58bc578, 0x58f5786, + 0x58f586d, 0x58f5b78, 0x58f5b8d, 0x58f5da7, 0x59578a7, 0x59587b8, + 0x595a7ad, 0x595a7ba, 0x595aba6, 0x595b8ab, 0x595b8bd, 0x5a7ac5d, + 0x5a7df5b, 0x5a7e5ba, 0x5ab7ac5, 0x5ae51ad, 0x5ae51ba, 0x5ae95ad, + 0x5ae95ba, 0x5b8bc5d, 0x5b8de5a, 0x5b8f5ab, 0x5ba8bc5, 0x5bf51ab, + 0x5bf51bd, 0x5bf95ab, 0x5bf95bd, 0x5c53473, 0x5c54384, 0x5c57387, + 0x5c58478, 0x5c78795, 0x5c7e587, 0x5c8f578, 0x5c95aba, 0x5c95efe, + 0x5ce7e5d, 0x5ce7fe5, 0x5cf8ef5, 0x5cf8f5d, 0x5e6ae5d, 0x5ea7fe5, + 0x5ead8f5, 0x5ef6ae5, 0x5ef7e56, 0x5efae51, 0x5efae95, 0x5f6bf5d, + 0x5fb8ef5, 0x5fbd7e5, 0x5fe6bf5, 0x5fe8f56, 0x5febf51, 0x5febf95, + 0x6279f97, 0x6289e98, 0x62a686a, 0x62b676b, 0x67b674b, 0x68a683a, + 0x6a3cfca, 0x6b4cecb, 0x6ce3ecd, 0x6ce3fec, 0x6cf4efc, 0x6cf4fcd, + 0x6e5c45e, 0x6ecb5ce, 0x6ecbc2e, 0x6f5c35f, 0x6fca5cf, 0x6fcac2f, + 0x7367b67, 0x7379c9d, 0x73879c9, 0x739f976, 0x73a9f97, 0x76fc23f, + 0x783ece7, 0x7875c51, 0x7879c92, 0x78ce7c2, 0x79f9a72, 0x7a27fef, + 0x7a7ba4b, 0x7c537dc, 0x7ce7dc2, 0x7d4bd7d, 0x7d7bd72, 0x8468a68, + 0x84789c9, 0x8489c9d, 0x849e986, 0x84b9e98, 0x86ec24e, 0x874fcf8, + 0x87cf8c2, 0x89e9b82, 0x8b28efe, 0x8b8ab3a, 0x8c548dc, 0x8cf8dc2, + 0x8d3ad8d, 0x8d8ad82, 0x93adf49, 0x94bde39, 0x9a359ba, 0x9abac92, + 0x9ae93ad, 0x9ae93ba, 0x9ae9a2d, 0x9ae9ba2, 0x9ae9d4b, 0x9b459ab, + 0x9bf94ab, 0x9bf94bd, 0x9bf9ab2, 0x9bf9b2d, 0x9bf9d3a, 0x9c9ab3a, + 0x9c9ba4b, 0x9e3afe9, 0x9e3dbf9, 0x9ef3e96, 0x9ef3ec9, 0x9efae92, + 0x9f4bef9, 0x9f4dae9, 0x9fe4f96, 0x9fe4fc9, 0x9febf92, 0xa2f517f, + 0xa31afef, 0xa31cfca, 0xa73cfca, 0xa7cfca2, 0xabac5c1, 0xb2e518e, + 0xb41befe, 0xb41cecb, 0xb84cecb, 0xb8cecb2, 0xc3435c1, 0xc73df8c, + 0xc84de7c, 0xce73fec, 0xce7d4fc, 0xcef3ec1, 0xcef7ec2, 0xcf84efc, + 0xcf8d3ec, 0xcfe4fc1, 0xcfe8fc2, 0xe5186ce, 0xe5495e6, 0xe54c95e, + 0xe5c45e1, 0xe9589e6, 0xe95c89e, 0xe96b25e, 0xe98c9e2, 0xec2419e, + 0xec51bce, 0xec95bce, 0xec9bce2, 0xefe5956, 0xefe9c92, 0xefec5c1, + 0xf5176cf, 0xf5395f6, 0xf53c95f, 0xf5c35f1, 0xf9579f6, 0xf95c79f, + 0xf96a25f, 0xf97c9f2, 0xfc2319f, 0xfc51acf, 0xfc95acf, 0xfc9acf2, + 0x13129f913, 0x13161b613, 0x131813c9c, 0x135c9bc53, 0x13813fcf8, + 0x13f913c9f, 0x14129e914, 0x14161a614, 0x141714c9c, 0x145c9ac54, + 0x14714ece7, 0x14e914c9e, 0x16715f517, 0x16815e518, 0x1715f51a7, + 0x17d7fcfd7, 0x1815e51b8, 0x18d8eced8, 0x197f97bf9, 0x198e98ae9, + 0x19edbede9, 0x19fdafdf9, 0x1a6a9f96a, 0x1b6b9e96b, 0x1ce7bcebc, + 0x1cf8acfac, 0x1e512815e, 0x1e914196e, 0x1ec6c86ce, 0x1f512715f, + 0x1f913196f, 0x1fc6c76cf, 0x2132cfc23, 0x2142cec24, 0x232b29e39, + 0x232cfc273, 0x23d3f9fd3, 0x242a29f49, 0x242cec284, 0x24d4e9ed4, + 0x253f538f5, 0x254e547e5, 0x25ed8ede5, 0x25fd7fdf5, 0x2712b2171, + 0x2812a2181, 0x29532b239, 0x29542a249, 0x29e389e89, 0x29f479f79, + 0x2a25f562a, 0x2ac9589ca, 0x2af52a95f, 0x2b25e562b, 0x2bc9579cb, + 0x2be52b95e, 0x2e52b251e, 0x2ec2642ce, 0x2f52a251f, 0x2fc2632cf, + 0x3181ce31c, 0x326286232, 0x34ece3484, 0x3589ca895, 0x359c89532, + 0x37387cf8c, 0x373f97c9f, 0x381835c51, 0x38dfcf83d, 0x39c2b2329, + 0x39efb3ef9, 0x3a19f9131, 0x3a343f9f4, 0x3a953f539, 0x3b23f9fb2, + 0x3bc5395c8, 0x3bdbe9e3d, 0x3c537f53c, 0x3cecd38d3, 0x3cf538f5c, + 0x3d36cfd3c, 0x3d39fd396, 0x3d39fd3c9, 0x3d3bd3595, 0x3d3fcfd31, + 0x3d9cb3d39, 0x3dfcfd73d, 0x3fc239c2f, 0x4171cf41c, 0x426276242, + 0x43fcf4373, 0x4579cb795, 0x459c79542, 0x471745c51, 0x47dece74d, + 0x48478ce7c, 0x484e98c9e, 0x49c2a2429, 0x49fea4fe9, 0x4a24e9ea2, + 0x4ac5495c7, 0x4adaf9f4d, 0x4b19e9141, 0x4b434e9e3, 0x4b954e549, + 0x4c548e54c, 0x4ce547e5c, 0x4cfcd47d4, 0x4d46ced4c, 0x4d49ed496, + 0x4d49ed4c9, 0x4d4ad4595, 0x4d4eced41, 0x4d9ca4d49, 0x4deced84d, + 0x4ec249c2e, 0x53f536bf5, 0x53f538f56, 0x53f53b8f5, 0x53f53bf51, + 0x54e546ae5, 0x54e547e56, 0x54e54a7e5, 0x54e54ae51, 0x5676b6c57, + 0x5686a6c58, 0x56ae5686a, 0x56bf5676b, 0x576b67956, 0x579c9bc53, + 0x579f795a7, 0x57abf57ab, 0x57dbc57d7, 0x57e567b67, 0x57e587b8b, + 0x57edb7d75, 0x586a68956, 0x589c9ac54, 0x589e895b8, 0x58bae58ba, + 0x58dac58d8, 0x58f568a68, 0x58f578a7a, 0x58fda8d85, 0x595ada8da, + 0x595bdb7db, 0x5acfc7ac5, 0x5ada8dea5, 0x5adf51ada, 0x5adf95ada, + 0x5aeafe8f5, 0x5bcec8bc5, 0x5bdb7dfb5, 0x5bde51bdb, 0x5bde95bdb, + 0x5bfbef7e5, 0x5c53d38d3, 0x5c54d47d4, 0x5cafca8f5, 0x5cbecb7e5, + 0x5ded8de56, 0x5dedb8de5, 0x5dfd7df56, 0x5dfda7df5, 0x5e5945ae9, + 0x5e9589ae9, 0x5ed6bde5d, 0x5f5935bf9, 0x5f9579bf9, 0x5fd6adf5d, + 0x62a69f96a, 0x62b69e96b, 0x67954c597, 0x67fc675cf, 0x68953c598, + 0x68ec685ce, 0x6a369f96a, 0x6b469e96b, 0x6cafca4fc, 0x6cbecb3ec, + 0x6ec686c2e, 0x6fc676c2f, 0x736cfc676, 0x79c5bc971, 0x7cef47efc, + 0x7d74d79c9, 0x7d75cfd75, 0x7d7cfd7c2, 0x7f517c51f, 0x846cec686, + 0x89c5ac981, 0x8cfe38fec, 0x8d83d89c9, 0x8d85ced85, 0x8d8ced8c2, + 0x8e518c51e, 0x97f974bf9, 0x97f974f96, 0x97f97bf92, 0x98e983ae9, + 0x98e983e96, 0x98e98ae92, 0x9acfac93a, 0x9ae9ba4b4, 0x9aed4ada9, + 0x9bcebc94b, 0x9bf9ab3a3, 0x9bfd3bdb9, 0x9ded4bde9, 0x9dedbde92, + 0x9dfd3adf9, 0x9dfdadf92, 0x9e398c9ec, 0x9e3bcebc9, 0x9f497c9fc, + 0x9f4acfac9, 0xa725f52a2, 0xac5945ca6, 0xaf96a596f, 0xb825e52b2, + 0xbc5935cb6, 0xbe96b596e, 0xcafca4fc1, 0xcafca84fc, 0xcafca8fc2, + 0xcbecb3ec1, 0xcbecb73ec, 0xcbecb7ec2, 0x13161cfc613, + 0x1391f913bf9, 0x14161cec614, 0x1491e914ae9, 0x16a615f516a, + 0x16b615e516b, 0x17e7fe7bfe7, 0x18f8ef8aef8, 0x21712cfc217, + 0x21812cec218, 0x21a21f912af, 0x21b21e912be, 0x23e3fe38fe3, + 0x24f4ef47ef4, 0x25e52b257e5, 0x25f52a258f5, 0x32629f96232, + 0x35295895325, 0x353c51bc535, 0x3598a359535, 0x35bc5358bc5, + 0x395cb35c593, 0x3a353f538f5, 0x3c2fc238fc2, 0x3c5953895c3, + 0x3e3fe3bfe31, 0x3e3fe6bfe3e, 0x3e3febfe73e, 0x42629e96242, + 0x45295795425, 0x454c51ac545, 0x4597b459545, 0x45ac5457ac5, + 0x495ca45c594, 0x4b454e547e5, 0x4c2ec247ec2, 0x4c5954795c4, + 0x4f4ef4aef41, 0x4f4ef6aef4f, 0x4f4efaef84f, 0x535c537bc53, + 0x539589535b8, 0x53f53bf5373, 0x545c548ac54, 0x549579545a7, + 0x54e54ae5484, 0x5aca9c89ca5, 0x5bcb9c79cb5, 0x5e51815ae51, + 0x5f51715bf51, 0x62762f5267f, 0x62862e5268e, 0x67c6fc674fc, + 0x68c6ec683ec, 0x73797f97bf9, 0x73cbc97c9bc, 0x791c9bc9719, + 0x79759645979, 0x7e7fe4fea7e, 0x7e7fe74fe76, 0x84898e98ae9, + 0x84cac98c9ac, 0x891c9ac9819, 0x89859635989, 0x8f8ef3efb8f, + 0x8f8ef83ef86, 0x97c9bc9794b, 0x97f974f97a7, 0x98c9ac9893a, + 0x98e983e98b8, 0x9e396b69e96, 0x9f496a69f96, 0xa7acafca4fc, + 0xac65c45ca6c, 0xaca9c289cac, 0xaeafe8fe3ae, 0xaeafea8fea2, + 0xb8bcbecb3ec, 0xbc65c35cb6c, 0xbcb9c279cbc, 0xbfbef7ef4bf, + 0xbfbefb7efb2, 0xcafca8fca3a, 0xcbecb7ecb4b, + }; + + map[33] = { + 0x16df, 0x16ed, 0x16fe, 0x21df, 0x21ed, 0x21fe, 0x62df, 0x62ed, 0x62fe, + 0x1343f6, 0x1454e6, 0x1535d6, 0x163543, 0x163f53, 0x164e34, 0x165d45, + 0x16787f, 0x16797d, 0x167987, 0x16898e, 0x16abaf, 0x16acad, 0x16bcbe, + 0x16dbcb, 0x16eaba, 0x16fcac, 0x178fa7, 0x1796ba, 0x1798a7, 0x179a7d, + 0x17a7df, 0x17a7ed, 0x17af97, 0x17dbf9, 0x1876cb, 0x1879b8, 0x187b8f, + 0x189eb8, 0x18b8df, 0x18b8fe, 0x18be78, 0x18fce7, 0x197dc9, 0x1986ac, + 0x1987c9, 0x198c9e, 0x19c9ed, 0x19c9fe, 0x19cd89, 0x19ead8, 0x1a7afe, + 0x1a7baf, 0x1a7cad, 0x1a7cba, 0x1a7fca, 0x1ac687, 0x1acba6, 0x1ad8fc, + 0x1b8abf, 0x1b8acb, 0x1b8bed, 0x1b8cbe, 0x1b8eab, 0x1ba698, 0x1bf9ea, + 0x1c9acd, 0x1c9bac, 0x1c9bce, 0x1c9cdf, 0x1c9dbc, 0x1cb679, 0x1ce7db, + 0x1d8986, 0x1d89b8, 0x1db8cb, 0x1e7876, 0x1e78a7, 0x1ea7ba, 0x1f9796, + 0x1f97c9, 0x1fc9ac, 0x21343f, 0x21353d, 0x213543, 0x21454e, 0x21787f, + 0x21797d, 0x21898e, 0x21abaf, 0x21acba, 0x21afca, 0x21bcbe, 0x21beab, + 0x21cacd, 0x21cdbc, 0x21d898, 0x21e787, 0x21f979, 0x234f73, 0x235187, + 0x235473, 0x23573d, 0x2373df, 0x2373ed, 0x237f53, 0x23d8f5, 0x243198, + 0x243584, 0x24384f, 0x245e84, 0x2484df, 0x2484fe, 0x248e34, 0x24f9e3, + 0x253d95, 0x254179, 0x254395, 0x25495e, 0x2595ed, 0x2595fe, 0x259d45, + 0x25e7d4, 0x2737fe, 0x27387f, 0x27397d, 0x273987, 0x273f97, 0x279143, + 0x279871, 0x27d4f9, 0x28478f, 0x284798, 0x2848ed, 0x28498e, 0x284e78, + 0x287154, 0x28f5e7, 0x29579d, 0x295879, 0x29589e, 0x2959df, 0x295d89, + 0x298135, 0x29e3d8, 0x2d4541, 0x2d4584, 0x2d8498, 0x2e3431, 0x2e3473, + 0x2e7387, 0x2f5351, 0x2f5395, 0x2f9579, 0x3186af, 0x3196ad, 0x3196ba, + 0x3196db, 0x31986a, 0x319ad8, 0x319b8a, 0x31dbf9, 0x31f96a, 0x34f362, + 0x34f6a3, 0x34f736, 0x34fa31, 0x34fa73, 0x3516ba, 0x35186a, 0x351876, + 0x3518a7, 0x351db8, 0x3521ba, 0x354362, 0x3546a3, 0x354a31, 0x354a73, + 0x3562ba, 0x356a3d, 0x3573d6, 0x35a31d, 0x35a73d, 0x35ad84, 0x36a3df, + 0x36a3ed, 0x36a3fe, 0x3763fe, 0x376543, 0x376f53, 0x3956ba, 0x39586a, + 0x395ba8, 0x3a31df, 0x3a73ed, 0x3a73fe, 0x3a7f53, 0x3ad84f, 0x3ad8f5, + 0x3adf95, 0x3aed95, 0x3af531, 0x3afe95, 0x3ba8f5, 0x3baf95, 0x3d6bf5, + 0x3d86f5, 0x3db8f5, 0x3db985, 0x3dbf51, 0x3dbf95, 0x3f5362, 0x4176bf, + 0x4176cb, 0x4176fc, 0x41796b, 0x417bf9, 0x417c9b, 0x4196be, 0x41e76b, + 0x41fce7, 0x4316cb, 0x43196b, 0x431986, 0x4319b8, 0x431fc9, 0x4321cb, + 0x4356b4, 0x435b41, 0x435b84, 0x4362cb, 0x436b4f, 0x4384f6, 0x43b41f, + 0x43b84f, 0x43bf95, 0x45e462, 0x45e6b4, 0x45e846, 0x45eb41, 0x45eb84, + 0x46b4df, 0x46b4ed, 0x46b4fe, 0x4736cb, 0x47396b, 0x473cb9, 0x486354, + 0x4864ed, 0x486e34, 0x4b41fe, 0x4b84df, 0x4b84ed, 0x4b8e34, 0x4bdf73, + 0x4be341, 0x4bed73, 0x4bf95e, 0x4bf9e3, 0x4bfe73, 0x4cb9e3, 0x4cbe73, + 0x4e3462, 0x4f6ce3, 0x4f96e3, 0x4fc793, 0x4fc9e3, 0x4fce31, 0x4fce73, + 0x5176cd, 0x5186ac, 0x5186ce, 0x5186ea, 0x51876c, 0x518a7c, 0x518ce7, + 0x51d86c, 0x51ead8, 0x53d562, 0x53d6c5, 0x53d956, 0x53dc51, 0x53dc95, + 0x5416ac, 0x54176c, 0x541796, 0x5417c9, 0x541ea7, 0x5421ac, 0x5436c5, + 0x543c51, 0x543c95, 0x5462ac, 0x546c5e, 0x5495e6, 0x54c51e, 0x54c95e, + 0x54ce73, 0x56c5df, 0x56c5ed, 0x56c5fe, 0x5846ac, 0x58476c, 0x584ac7, + 0x596435, 0x5965df, 0x596d45, 0x5ac7d4, 0x5acd84, 0x5c51ed, 0x5c95df, + 0x5c95fe, 0x5c9d45, 0x5cd451, 0x5cdf84, 0x5ce73d, 0x5ce7d4, 0x5ced84, + 0x5cfe84, 0x5d4562, 0x5e6ad4, 0x5e76d4, 0x5ea7d4, 0x5ea874, 0x5ead41, + 0x5ead84, 0x62787f, 0x627987, 0x627f97, 0x62898e, 0x628e78, 0x62979d, + 0x629d89, 0x62abaf, 0x62acad, 0x62acba, 0x62bcbe, 0x63af53, 0x64be34, + 0x65cd45, 0x6a3baf, 0x6a3cad, 0x6a3cba, 0x6a3fca, 0x6ac243, 0x6ad4fc, + 0x6b4abf, 0x6b4acb, 0x6b4cbe, 0x6b4eab, 0x6ba254, 0x6bf5ea, 0x6c5acd, + 0x6c5bac, 0x6c5bce, 0x6c5dbc, 0x6cb235, 0x6ce3db, 0x6d4b54, 0x6dbc4b, + 0x6dbcb2, 0x6e3a43, 0x6eab3a, 0x6eaba2, 0x6f5c35, 0x6fca5c, 0x6fcac2, + 0x7376df, 0x73876f, 0x7396ba, 0x7397d6, 0x739876, 0x73a97d, 0x73a987, + 0x73af97, 0x73d6cb, 0x73db8f, 0x73db98, 0x73dbf9, 0x73dfc9, 0x73edc9, + 0x73f976, 0x73fec9, 0x743bf9, 0x76b23f, 0x76c23d, 0x76c243, 0x76c2d4, + 0x76cb23, 0x76d4fc, 0x76fc23, 0x78f3a7, 0x78fa72, 0x791643, 0x796243, + 0x796b23, 0x796ba2, 0x796d4b, 0x79a7d2, 0x7a27fe, 0x7a2987, 0x7a2f97, + 0x7c9243, 0x7c9b23, 0x7d4bf9, 0x7d4cb9, 0x7d4f96, 0x7d4fc9, 0x7db2f9, + 0x8476cb, 0x8478f6, 0x847986, 0x8486fe, 0x84986e, 0x84b78f, 0x84b798, + 0x84be78, 0x84dfa7, 0x84e786, 0x84eda7, 0x84f6ac, 0x84fc79, 0x84fc9e, + 0x84fce7, 0x84fea7, 0x854ce7, 0x86a24f, 0x86a254, 0x86a2f5, 0x86ac24, + 0x86c24e, 0x86ea24, 0x86f5ea, 0x871654, 0x876254, 0x876c24, 0x876cb2, + 0x876f5c, 0x87b8f2, 0x89e4b8, 0x89eb82, 0x8a7254, 0x8a7c24, 0x8b2798, + 0x8b28ed, 0x8b2e78, 0x8f5ac7, 0x8f5ce7, 0x8f5e76, 0x8f5ea7, 0x8fc2e7, + 0x935ad8, 0x95796d, 0x9586ac, 0x958796, 0x9589e6, 0x9596ed, 0x95c879, + 0x95c89e, 0x95cd89, 0x95d896, 0x95dfb8, 0x95e6ba, 0x95ea7d, 0x95ea87, + 0x95ead8, 0x95edb8, 0x95feb8, 0x96a25d, 0x96b235, 0x96b25e, 0x96b2e3, + 0x96ba25, 0x96db25, 0x96e3db, 0x97d5c9, 0x97dc92, 0x981635, 0x986235, + 0x986a25, 0x986ac2, 0x986e3a, 0x98c9e2, 0x9b8235, 0x9b8a25, 0x9c2879, + 0x9c29df, 0x9c2d89, 0x9e3ad8, 0x9e3ba8, 0x9e3d86, 0x9e3db8, 0x9ea2d8, + 0xa2417f, 0xa2517d, 0xa25187, 0xa251d8, 0xa25417, 0xa257d4, 0xa2d8f5, + 0xa2f517, 0xa31afe, 0xa31baf, 0xa31cad, 0xa31cba, 0xa31fca, 0xa5c417, + 0xa73cad, 0xa73cba, 0xa73fca, 0xa7a2df, 0xa7ba2f, 0xa7c243, 0xa7cad2, + 0xa7cba2, 0xa7d4bf, 0xa7d4cb, 0xa7d4fc, 0xa7df5c, 0xa7ed5c, 0xa7fca2, + 0xa7fe5c, 0xa874fc, 0xabf73a, 0xac2187, 0xac2417, 0xac2431, 0xac2d84, + 0xac6287, 0xad41fc, 0xad84fc, 0xad8f5c, 0xad8fc2, 0xb2318f, 0xb23198, + 0xb231f9, 0xb23518, 0xb238f5, 0xb2518e, 0xb2e318, 0xb2f9e3, 0xb3a518, + 0xb41abf, 0xb41acb, 0xb41bed, 0xb41cbe, 0xb41eab, 0xb84abf, 0xb84acb, + 0xb84eab, 0xb8a254, 0xb8abf2, 0xb8acb2, 0xb8b2fe, 0xb8cb2e, 0xb8df3a, + 0xb8eab2, 0xb8ed3a, 0xb8f5ac, 0xb8f5ce, 0xb8f5ea, 0xb8fe3a, 0xb985ea, + 0xba2198, 0xba2518, 0xba2541, 0xba2f95, 0xba6298, 0xbce84b, 0xbf51ea, + 0xbf95ea, 0xbf9e3a, 0xbf9ea2, 0xc2319d, 0xc24179, 0xc2419e, 0xc241e7, + 0xc24319, 0xc249e3, 0xc2d419, 0xc2e7d4, 0xc4b319, 0xc51acd, 0xc51bac, + 0xc51bce, 0xc51cdf, 0xc51dbc, 0xc793db, 0xc95bac, 0xc95bce, 0xc95dbc, + 0xc9ac2d, 0xc9b235, 0xc9bac2, 0xc9bce2, 0xc9c2ed, 0xc9dbc2, 0xc9df4b, + 0xc9e3ad, 0xc9e3ba, 0xc9e3db, 0xc9ed4b, 0xc9fe4b, 0xcad95c, 0xcb2179, + 0xcb2319, 0xcb2351, 0xcb2e73, 0xcb6279, 0xce31db, 0xce73db, 0xce7d4b, + 0xce7db2, 0xd4196b, 0xd45846, 0xd45b41, 0xd45b84, 0xd4b4f1, 0xd4bf95, + 0xd848f6, 0xd84986, 0xd84b98, 0xd84fc9, 0xd86c24, 0xd89b82, 0xdb2518, + 0xdb41cb, 0xdb84cb, 0xdb8bf2, 0xdb8cb2, 0xdb8f5c, 0xdf3a73, 0xe3186a, + 0xe34736, 0xe34a31, 0xe34a73, 0xe3a3d1, 0xe3ad84, 0xe737d6, 0xe73876, + 0xe73a87, 0xe73db8, 0xe76b23, 0xe78a72, 0xea2417, 0xea31ba, 0xea73ba, + 0xea7ad2, 0xea7ba2, 0xea7d4b, 0xed5c95, 0xf5176c, 0xf53956, 0xf53c51, + 0xf53c95, 0xf5c5e1, 0xf5ce73, 0xf95796, 0xf959e6, 0xf95c79, 0xf95ea7, + 0xf96a25, 0xf97c92, 0xfc2319, 0xfc51ac, 0xfc95ac, 0xfc9ac2, 0xfc9ce2, + 0xfc9e3a, 0xfe4b84, 0x1312813f, 0x13129813, 0x1312f913, 0x13191436, + 0x13196b23, 0x13198163, 0x131f9163, 0x1321fc23, 0x132c6813, 0x135813b8, + 0x1369193d, 0x138136cb, 0x138139b8, 0x13913dc9, 0x139143c9, 0x13cb6139, + 0x13f913c9, 0x14127914, 0x1412914e, 0x1412e714, 0x14171546, 0x14176c24, + 0x14179164, 0x141e7164, 0x1421ea24, 0x142a6914, 0x1467174f, 0x14714fa7, + 0x147154a7, 0x149146ac, 0x149147c9, 0x14ac6147, 0x14e714a7, 0x1512715d, + 0x15128715, 0x1512d815, 0x15181356, 0x15186a25, 0x15187165, 0x151d8165, + 0x1521db25, 0x152b6715, 0x1568185e, 0x157156ba, 0x157158a7, 0x15815eb8, + 0x15ba6158, 0x15d815b8, 0x1671f517, 0x1676fc67, 0x1681e318, 0x1686ea68, + 0x1691d419, 0x1696db69, 0x16a986a9, 0x16ac86a8, 0x16af96a9, 0x16b796b7, + 0x16ba96b9, 0x16be76b7, 0x16c876c8, 0x16cb76c7, 0x16cd86c8, 0x17151a7d, + 0x176bf676, 0x176c67d6, 0x17941a74, 0x1797a7ba, 0x179bd7bd, 0x17af5175, + 0x17ce7bce, 0x17cecf7e, 0x17e78ce7, 0x18131b8f, 0x186a68f6, 0x186ce686, + 0x18751b85, 0x1878b8cb, 0x187cf8cf, 0x18ad8cad, 0x18adae8d, 0x18be3183, + 0x18d89ad8, 0x19141c9e, 0x196ad696, 0x196b69e6, 0x19831c93, 0x1989c9ac, + 0x198ae9ae, 0x19bf9abf, 0x19bfbd9f, 0x19cd4194, 0x19f97bf9, 0x1a78ca78, + 0x1ab89ab8, 0x1ae9aeba, 0x1afe9aea, 0x1bc97bc9, 0x1bd7bdcb, 0x1bed7bdb, + 0x1cdf8cfc, 0x1cf8cfac, 0x1d412914, 0x1d421c24, 0x1d7dbd7f, 0x1d7ecde7, + 0x1d8dad8f, 0x1dae9dea, 0x1e312813, 0x1e321b23, 0x1e9fbef9, 0x1ecf8efc, + 0x1f512715, 0x1f521a25, 0x2132b23f, 0x2132cb23, 0x2135b23b, 0x2142ac24, + 0x2142c24e, 0x2143c24c, 0x2152a25d, 0x2152ba25, 0x2154a25a, 0x21754175, + 0x21835183, 0x21943194, 0x231913d1, 0x232bf273, 0x232c2187, 0x232c273d, + 0x23537387, 0x2358d38d, 0x235b273b, 0x237b23cb, 0x237c243c, 0x237fc23c, + 0x239e389e, 0x239e9f3e, 0x23e349e3, 0x241714f1, 0x242a2198, 0x242a284f, + 0x242ce284, 0x24348498, 0x2439f49f, 0x243c284c, 0x247d497d, 0x247d7e4d, + 0x248a254a, 0x248ac24a, 0x248ea24a, 0x24d457d4, 0x251815e1, 0x252ad295, + 0x252b2179, 0x252b295e, 0x25459579, 0x2547e57e, 0x254a295a, 0x258f578f, + 0x258f8d5f, 0x259a25ba, 0x259b235b, 0x259db25b, 0x25f538f5, 0x27349734, + 0x27845784, 0x27912b23, 0x27e57e87, 0x27fe57e7, 0x28712c24, 0x28953895, + 0x28d38d98, 0x28ed38d8, 0x29812a25, 0x29df49f9, 0x29f49f79, 0x2a24f62a, + 0x2a25462a, 0x2a26f52a, 0x2a62f96a, 0x2af52a95, 0x2b23562b, 0x2b25e62b, + 0x2b26e32b, 0x2b62e76b, 0x2be32b73, 0x2c23d62c, 0x2c24362c, 0x2c26d42c, + 0x2c62d86c, 0x2cd42c84, 0x2d3d8d3f, 0x2d3e9de3, 0x2d4d7d4f, 0x2d7e5de7, + 0x2db2562b, 0x2db6296b, 0x2e5f8ef5, 0x2e9f4ef9, 0x2ea2462a, 0x2ea6286a, + 0x2fc2362c, 0x2fc6276c, 0x3138163f, 0x314a3914, 0x319121ba, 0x319a313d, + 0x321c232d, 0x3436b4cb, 0x34384986, 0x34384b98, 0x343b41cb, 0x343b84cb, + 0x3489a348, 0x34a3484f, 0x3516b676, 0x3518a313, 0x35373876, 0x35373a87, + 0x353a31ba, 0x353a73ba, 0x3548a348, 0x354a3595, 0x3562686a, 0x356a3bab, + 0x356bd3bd, 0x357387b8, 0x358ad38d, 0x358d38d6, 0x358d3b8d, 0x359a359d, + 0x35b238b2, 0x35bd73bd, 0x35d3bd31, 0x362bf232, 0x362cb232, 0x36bdb3ed, + 0x36ce3bce, 0x373879b8, 0x37387b8f, 0x37397dc9, 0x373987c9, 0x373e87b8, + 0x373f97c9, 0x3818fa31, 0x38d38df6, 0x38d3b8df, 0x38d3e8d6, 0x38d3eb8d, + 0x398623c2, 0x39e389e6, 0x39e38c9e, 0x39e39ed6, 0x39e39fe6, 0x39e3c9ed, + 0x39e3c9fe, 0x39e3feb8, 0x3a13f913, 0x3a348e34, 0x3a813981, 0x3a9e389e, + 0x3ad38d3f, 0x3ad9f49f, 0x3af53959, 0x3af9e39e, 0x3afe8f58, 0x3b236298, + 0x3b238b2f, 0x3b238cb2, 0x3b8fe3ce, 0x3bd31bdf, 0x3bd31ebd, 0x3bd73bdf, + 0x3bd73ebd, 0x3bdbed95, 0x3c23d9c2, 0x3c2439c2, 0x3c9e3bce, 0x3cb239c2, + 0x3ce31bce, 0x3ce31ced, 0x3ce31cfe, 0x3ce73bce, 0x3ce73ced, 0x3ce73cfe, + 0x3d36bdf3, 0x3dbf9e3e, 0x3e346ce3, 0x3e349e36, 0x3e34ce31, 0x3e34ce73, + 0x3e73ce87, 0x3ecf8d3e, 0x3ed6ce3e, 0x3fc239c2, 0x4149164e, 0x415b4715, + 0x417121cb, 0x417b414f, 0x421a242f, 0x4316c686, 0x4319b414, 0x4359b459, + 0x435b4373, 0x4362696b, 0x436cf4cf, 0x437b437f, 0x438498c9, 0x439bf49f, + 0x439f49f6, 0x439f4c9f, 0x43cf84cf, 0x43f4cf41, 0x4546c5ac, 0x45495796, + 0x45495c79, 0x454c51ac, 0x454c95ac, 0x4597b459, 0x45b4595e, 0x462ac242, + 0x462ce242, 0x46ad4cad, 0x46cfc4df, 0x479624a2, 0x47d47df6, 0x47d47ed6, + 0x47d497d6, 0x47d49a7d, 0x47d4a7df, 0x47d4a7ed, 0x47d4edc9, 0x48478fa7, + 0x484798a7, 0x484987c9, 0x48498c9e, 0x484d98c9, 0x484e78a7, 0x4919eb41, + 0x49f49fe6, 0x49f4c9fe, 0x49f4d9f6, 0x49f4dc9f, 0x4a24f7a2, 0x4a2547a2, + 0x4a7d4cad, 0x4ac247a2, 0x4ad41adf, 0x4ad41aed, 0x4ad41cad, 0x4ad84adf, + 0x4ad84aed, 0x4ad84cad, 0x4b14e714, 0x4b459d45, 0x4b7d497d, 0x4b914791, + 0x4be34737, 0x4be7d47d, 0x4bed9e39, 0x4bf49f4e, 0x4bf7e57e, 0x4c246279, + 0x4c249ac2, 0x4c249c2e, 0x4c9ed4ad, 0x4cf41cfe, 0x4cf41dcf, 0x4cf84cfe, + 0x4cf84dcf, 0x4cfcdf73, 0x4d456ad4, 0x4d457d46, 0x4d45ad41, 0x4d45ad84, + 0x4d84ad98, 0x4dae9f4d, 0x4df6ad4d, 0x4ea247a2, 0x4f46cfe4, 0x4fce7d4d, + 0x513c5813, 0x5157165d, 0x518121ac, 0x518c515e, 0x521b252e, 0x5378c537, + 0x53c5373d, 0x5416a696, 0x5417c515, 0x5437c537, 0x543c5484, 0x5462676c, + 0x546ae5ae, 0x547ce57e, 0x547e57e6, 0x547e5a7e, 0x548c548e, 0x549579a7, + 0x54ae95ae, 0x54e5ae51, 0x562ad252, 0x562ba252, 0x56aea5fe, 0x56bf5abf, + 0x5717dc51, 0x57e57ed6, 0x57e5a7ed, 0x57e5f7e6, 0x57e5fa7e, 0x587625b2, + 0x58f578f6, 0x58f57b8f, 0x58f58df6, 0x58f58fe6, 0x58f5b8df, 0x58f5b8fe, + 0x58f5dfa7, 0x595798a7, 0x59579a7d, 0x595879b8, 0x59589eb8, 0x595d89b8, + 0x595f79a7, 0x5a256287, 0x5a257a2d, 0x5a257ba2, 0x5a7df5bf, 0x5ae51aed, + 0x5ae51fae, 0x5ae95aed, 0x5ae95fae, 0x5aeafe84, 0x5b25e8b2, 0x5b8f5abf, + 0x5ba258b2, 0x5bf51abf, 0x5bf51bdf, 0x5bf51bfe, 0x5bf95abf, 0x5bf95bdf, + 0x5bf95bfe, 0x5c15d815, 0x5c537f53, 0x5c715871, 0x5c8f578f, 0x5cd45848, + 0x5cd8f58f, 0x5cdf7d47, 0x5ce57e5d, 0x5ce8d38d, 0x5db258b2, 0x5e56aed5, + 0x5ead8f5f, 0x5f536bf5, 0x5f538f56, 0x5f53bf51, 0x5f53bf95, 0x5f95bf79, + 0x5fbd7e5f, 0x5fe6bf5f, 0x62a686af, 0x62a6986a, 0x62ac86a8, 0x62b6796b, + 0x62b696be, 0x62ba96b9, 0x62c676cd, 0x62c6876c, 0x62cb76c7, 0x63fec3e3, + 0x64eda4d4, 0x65dfb5f5, 0x679b674b, 0x67b67254, 0x67b67c4b, 0x67c67d5c, + 0x67c6875c, 0x67fc675c, 0x68a68f3a, 0x68a6983a, 0x68c68235, 0x68c68a5c, + 0x68ea683a, 0x69a69243, 0x69a69b3a, 0x69b69e4b, 0x69db694b, 0x6a34ca34, + 0x6a3696ad, 0x6a3c86a8, 0x6a3f96a9, 0x6aeab5ea, 0x6b45ab45, 0x6b4676bf, + 0x6b4a96b9, 0x6b4e76b7, 0x6bdbc3db, 0x6c53bc53, 0x6c5686ce, 0x6c5b76c7, + 0x6c5d86c8, 0x6cfca4fc, 0x73497346, 0x734a9734, 0x7367b67f, 0x7367c67d, + 0x7367fc67, 0x7387b8cb, 0x73967b67, 0x73a97aba, 0x73b67cb6, 0x73bdb97d, + 0x73c687c6, 0x73dcf8cf, 0x73febf9b, 0x767b627f, 0x76c61643, 0x7845b784, + 0x78748654, 0x787b84cb, 0x787b8cb2, 0x797a7ba2, 0x79a7ba4b, 0x79bd7bd2, + 0x7a7ba4bf, 0x7a7bac4b, 0x7a7cad5c, 0x7a7cba5c, 0x7a7eba4b, 0x7a7fca5c, + 0x7bd74bdf, 0x7bd7bdf2, 0x7bd7ebd2, 0x7cb21571, 0x7ce75bce, 0x7ce75cfe, + 0x7ce7bce2, 0x7ce7ced2, 0x7ce7cfe2, 0x7ce7fe4b, 0x7e57e876, 0x7e5a7eba, + 0x7e785ea7, 0x7e78ce72, 0x7f517c51, 0x8468a68f, 0x8468c68e, 0x8468ea68, + 0x84768c68, 0x8498c9ac, 0x84a698a6, 0x84c68ac6, 0x84cfc78f, 0x84edce7c, + 0x84fae9ae, 0x868c628e, 0x86a61654, 0x87b8cb5c, 0x87cf8cf2, 0x8953c895, + 0x89859635, 0x898c95ac, 0x898c9ac2, 0x8ac21381, 0x8ad83aed, 0x8ad83cad, + 0x8ad8adf2, 0x8ad8aed2, 0x8ad8cad2, 0x8ad8ed5c, 0x8b8abf3a, 0x8b8acb3a, + 0x8b8cb5ce, 0x8b8cba5c, 0x8b8dcb5c, 0x8b8eab3a, 0x8cf85cfe, 0x8cf8cfe2, + 0x8cf8dcf2, 0x8d38d986, 0x8d3b8dcb, 0x8d893db8, 0x8d89ad82, 0x8e318a31, + 0x9569a69d, 0x9569b69e, 0x9569db69, 0x9579a7ba, 0x95869a69, 0x95a69ba6, + 0x95aea89e, 0x95b679b6, 0x95dfad8a, 0x95ebd7bd, 0x969a629d, 0x96b61635, + 0x98ae9ae2, 0x98c9ac3a, 0x9ae93aed, 0x9ae9aed2, 0x9ae9fae2, 0x9ba21491, + 0x9bf94abf, 0x9bf94bdf, 0x9bf9abf2, 0x9bf9bdf2, 0x9bf9bfe2, 0x9bf9df3a, + 0x9c9ac3ad, 0x9c9acb3a, 0x9c9bac4b, 0x9c9bce4b, 0x9c9dbc4b, 0x9c9fac3a, + 0x9d419b41, 0x9f49f796, 0x9f4c9fac, 0x9f974fc9, 0x9f97bf92, 0xa314ca34, + 0xa348ca34, 0xa72af52a, 0xa734ca34, 0xa73ca787, 0xa78ca782, 0xa7ba4b54, + 0xa7fe4fc4, 0xab45ab41, 0xab45ab84, 0xab894ab8, 0xaba8b298, 0xae349ae3, + 0xae9aeba2, 0xaeab5ea1, 0xaeab9e3a, 0xaf96a596, 0xb459ab45, 0xb82be32b, + 0xb8cb5c35, 0xb8ed5ea5, 0xbc53bc51, 0xbc53bc95, 0xbc975bc9, 0xbcb9c279, + 0xbd457bd4, 0xbd7bdcb2, 0xbdbc3db1, 0xbdbc7d4b, 0xbe76b376, 0xc537bc53, + 0xc92cd42c, 0xc9ac3a43, 0xc9df3db3, 0xcd86c486, 0xcf538cf5, 0xcf8cfac2, + 0xcfca4fc1, 0xcfca8f5c, 0xd38d3fc9, 0xd3a8d398, 0xd3bd73cb, 0xd457d4a7, + 0xd4adaf95, 0xd7bd7f5c, 0xe349e3c9, 0xe3cecd84, 0xe57e5db8, 0xe5ae95ba, + 0xe5c7e587, 0xe9ae9d4b, 0xf49f4ea7, 0xf4b9f479, 0xf4cf84ac, 0xf538f5b8, + 0xf5bfbe73, 0xf8cf8e3a, 0x13161b613f, 0x13161c613d, 0x13161c6143, + 0x13161fc613, 0x131813b8cb, 0x13218c2138, 0x135161b613, 0x13813cfc8f, + 0x139f913bf9, 0x13bc913bc9, 0x14161a614f, 0x14161a6154, 0x14161c614e, + 0x14161ea614, 0x141914c9ac, 0x14219a2149, 0x147e714ce7, 0x14914aea9e, + 0x14a71ca714, 0x15161a615d, 0x15161b615e, 0x15161db615, 0x151715a7ba, + 0x15217b2157, 0x15715bdb7d, 0x158d815ad8, 0x15ab815ab8, 0x16a61f516a, + 0x16b61e316b, 0x16c61d416c, 0x17bfe7bfeb, 0x17cfdfc7df, 0x18aefea8fe, + 0x18ced8cedc, 0x19adf9adfa, 0x19bdedb9ed, 0x21712fc217, 0x21812ea218, + 0x21912db219, 0x21a21812af, 0x21a21912ad, 0x21a21f912a, 0x21b21712bf, + 0x21b21912be, 0x21b21e712b, 0x21c21712cd, 0x21c21812ce, 0x21c21d812c, + 0x232c237387, 0x232c28d38d, 0x238fe38fe8, 0x239fdf93df, 0x23e32b29e3, + 0x242a248498, 0x242a29f49f, 0x247efe74fe, 0x249ed49ed9, 0x24d42c27d4, + 0x252b259579, 0x252b27e57e, 0x257df57df7, 0x258ded85ed, 0x25f52a28f5, + 0x2a212912ba, 0x2a624962a4, 0x2a8952a895, 0x2ac212812a, 0x2b212712cb, + 0x2b625762b5, 0x2b7329732b, 0x2c623862c3, 0x2c7842c784, 0x3132b9123b, + 0x3191d3bd31, 0x326239623d, 0x326286232f, 0x3262962432, 0x3262f96232, + 0x32b238b298, 0x3436b496b9, 0x3437b437cb, 0x3438468c68, 0x3439cb49cb, + 0x34a349f49f, 0x3526286232, 0x3537367b67, 0x353a3595ba, 0x353a68a63a, + 0x353ba38ba3, 0x359d3bd359, 0x36ce3686ce, 0x37349734c9, 0x37387cf8cf, + 0x3739f97bf9, 0x373bc97bc9, 0x3816c31681, 0x389c2389c2, 0x38bce38bce, + 0x38dfc3df8d, 0x38fe38fe86, 0x39e369b69e, 0x39e389eb8b, 0x39efb3e9f9, + 0x39fd3fd9f6, 0x3a13913aba, 0x3a35f538f5, 0x3a39538953, 0x3aefe8fe3e, + 0x3b239fb29f, 0x3b26932b6b, 0x3bdbed39e3, 0x3bfe3bfe1b, 0x3cecde8d3e, + 0x3cfc238fc2, 0x3d36cfd3cf, 0x3d38d398c9, 0x3d3cfd31cf, 0x3e3181ce31, + 0x3e348ce348, 0x3e6bfbef3e, 0x3efebfe73e, 0x4142c7124c, 0x4171f4cf41, + 0x426247624f, 0x4262762542, 0x426296242e, 0x4262e76242, 0x42c249c279, + 0x437f4cf437, 0x4546c576c7, 0x4547ac57ac, 0x4548c548ac, 0x4549569a69, + 0x45b457e57e, 0x46ad4696ad, 0x479a7a24a2, 0x47d467c67d, 0x47d497dc9c, + 0x47dec4d7e7, 0x47ef4ef7e6, 0x48457845a7, 0x4847e78ce7, 0x48498ae9ae, + 0x484a78ca78, 0x4916a41691, 0x49cad49cad, 0x49ed49ed96, 0x49fea4fe9f, + 0x4adafd9f4d, 0x4aea249ea2, 0x4b14714bcb, 0x4b43e349e3, 0x4b47349734, + 0x4bded9ed4d, 0x4c247ec27e, 0x4c26742c6c, 0x4ced4ced1c, 0x4cfcdf47d4, + 0x4d4191ad41, 0x4d459ad459, 0x4d6cecde4d, 0x4dedced84d, 0x4f46aef4ae, + 0x4f49f479a7, 0x4f4aef41ae, 0x5152a8125a, 0x5181e5ae51, 0x526258625e, + 0x526276252d, 0x5262d86252, 0x52a257a287, 0x53c538d38d, 0x548e5ae548, + 0x56bf5676bf, 0x5716b51671, 0x578b2578b2, 0x57abf57abf, 0x57df57df76, + 0x57edb5ed7e, 0x58de5de8d6, 0x58f568a68f, 0x58f578fa7a, 0x58fda5f8d8, + 0x59538953b8, 0x59579bd7bd, 0x5958d89ad8, 0x595ab89ab8, 0x5a258da28d, + 0x5a26852a6a, 0x5adf5adf1a, 0x5aeafe58f5, 0x5bdb257db2, 0x5bfbef7e5f, + 0x5c15815cac, 0x5c54d457d4, 0x5c58457845, 0x5cfdf7df5f, 0x5df56adf5a, + 0x5e56bde5bd, 0x5e57e587b8, 0x5e5bde51bd, 0x5f5171bf51, 0x5f537bf537, + 0x5fdfadf95f, 0x62762f5267, 0x62862e3268, 0x62962d4269, 0x6762562687, + 0x676b456b47, 0x6796246267, 0x67cfc674fc, 0x6862362698, 0x686c536c58, + 0x68aea685ea, 0x696a346a39, 0x69bdb693db, 0x7121c21871, 0x73467c6734, + 0x76714c6174, 0x79121b2171, 0x7a78ca785c, 0x7a7cfca4fc, 0x7bc517bc51, + 0x7bdf57dfbd, 0x7bfe7bfeb2, 0x7ce7bce4b4, 0x7cef47ecfc, 0x7cfd7fdcf2, + 0x7d7bd7c5bc, 0x7e57e6b676, 0x7efe4fea7e, 0x8121a21981, 0x84568a6845, + 0x86815a6185, 0x8a318ca318, 0x8ad8cad5c5, 0x8ade58daea, 0x8aef8efae2, + 0x8b89ab893a, 0x8b8aeab5ea, 0x8ced8cedc2, 0x8cfe38fecf, 0x8d38d6c686, + 0x8ded5edb8d, 0x8f8cf8a3ca, 0x95369b6953, 0x96913b6193, 0x9ab419ab41, + 0x9adf9adfa2, 0x9aed49edae, 0x9bde9debd2, 0x9bf9abf3a3, 0x9bfd39fbdb, + 0x9c97bc974b, 0x9c9bdbc3db, 0x9e9ae9b4ab, 0x9f49f6a696, 0x9fdf3dfc9f, + 0xa616516ba6, 0xac616416a6, 0xb616316cb6, 0xd3adf9dfd3, 0xd3dfcdfd73, + 0xd7df5dfda7, 0xe5ced8ede5, 0xe5edbede95, 0xe9ed4edec9, 0xf4bfe7fef4, + 0xf4feafef84, 0xf8fe3fefb8, + }; + + map[42] = { + 0x16af, 0x176f, 0x1a7f, 0x316f, 0x321f, 0x362f, 0x62af, 0x6a3f, 0xa31f, + 0xa73f, 0xf21a, 0xf736, 0x14546a, 0x145476, 0x1454a7, 0x16abcb, + 0x16bcdb, 0x16cbec, 0x16dbdf, 0x16dbed, 0x16ecde, 0x16ecef, 0x176898, + 0x176ded, 0x1789b8, 0x1798c9, 0x17b8bf, 0x17b8cb, 0x17bcb6, 0x17c9bc, + 0x17c9cf, 0x186cad, 0x189ad8, 0x189b8a, 0x189d86, 0x189db8, 0x18dfc9, + 0x196bae, 0x198ae9, 0x198c9a, 0x198e96, 0x198ec9, 0x19efb8, 0x1a7898, + 0x1a7bcb, 0x1a7ded, 0x1ad8ed, 0x1ae9de, 0x1b8baf, 0x1b8cba, 0x1b8dbf, + 0x1b8dcb, 0x1b8fec, 0x1bae78, 0x1bd7cb, 0x1c9bca, 0x1c9caf, 0x1c9ebc, + 0x1c9ecf, 0x1c9fdb, 0x1cad79, 0x1ce7bc, 0x1d86df, 0x1d86ed, 0x1db8ed, + 0x1dbd7f, 0x1dbf9e, 0x1debd7, 0x1e96de, 0x1e96ef, 0x1ec9de, 0x1ece7f, + 0x1ecf8d, 0x1edce7, 0x1f8ad8, 0x1f9ae9, 0x21bcdb, 0x21bdbf, 0x21cbec, + 0x21cecf, 0x21dbed, 0x21deda, 0x21ecde, 0x23723f, 0x26726f, 0x2a4584, + 0x2a484f, 0x2a5495, 0x2a595f, 0x2a8498, 0x2a8981, 0x2a9589, 0x316898, + 0x316ded, 0x3189b8, 0x3198c9, 0x31b8bf, 0x31b8cb, 0x31c9bc, 0x31c9cf, + 0x321454, 0x321bcb, 0x321ded, 0x324584, 0x325495, 0x32848f, 0x328498, + 0x328981, 0x329589, 0x32959f, 0x34196b, 0x3456b4, 0x345846, 0x345b41, + 0x345b84, 0x34bf95, 0x35186c, 0x3546c5, 0x354956, 0x354c51, 0x354c95, + 0x35cf84, 0x362454, 0x362898, 0x362bcb, 0x36b4cb, 0x36c5bc, 0x38486f, + 0x384986, 0x384b8f, 0x384b98, 0x384fc9, 0x386c24, 0x38b298, 0x395896, + 0x39596f, 0x395c89, 0x395c9f, 0x395fb8, 0x396b25, 0x39c289, 0x3b41bf, + 0x3b41cb, 0x3b84cb, 0x3b8b2f, 0x3b8f5c, 0x3bc8b2, 0x3c51bc, 0x3c51cf, + 0x3c95bc, 0x3c9c2f, 0x3c9f4b, 0x3cb9c2, 0x3ded62, 0x3f46b4, 0x3f56c5, + 0x416cad, 0x4196ad, 0x4196ba, 0x4196db, 0x41976d, 0x419a7d, 0x41c9ad, + 0x41dfc9, 0x421cad, 0x4542a1, 0x454316, 0x45462a, 0x4546a3, 0x454736, + 0x456ad4, 0x456b4a, 0x457d46, 0x45846a, 0x4584a7, 0x45a7d4, 0x45ad41, + 0x45ad84, 0x45b41a, 0x45b84a, 0x45d416, 0x45d421, 0x45d6b4, 0x45d846, + 0x45db41, 0x45db84, 0x462cad, 0x46ad4f, 0x46b4fa, 0x47d4f6, 0x487654, + 0x4a7d4f, 0x4ad41f, 0x4ad84f, 0x4adf95, 0x4b41fa, 0x4baf95, 0x4d416f, + 0x4d421f, 0x4d4f62, 0x4d5462, 0x4d6b4f, 0x4db84f, 0x4dbf95, 0x4dcb95, + 0x4df6c5, 0x4df956, 0x4dfc51, 0x4dfc95, 0x516bae, 0x5186ae, 0x5186ca, + 0x5186ec, 0x51876e, 0x518a7e, 0x51b8ae, 0x51efb8, 0x521bae, 0x546ae5, + 0x546c5a, 0x547e56, 0x54956a, 0x5495a7, 0x54a7e5, 0x54ae51, 0x54ae95, + 0x54c51a, 0x54c95a, 0x54e516, 0x54e521, 0x54e6c5, 0x54e956, 0x54ec51, + 0x54ec95, 0x562bae, 0x56ae5f, 0x56c5fa, 0x57e5f6, 0x597645, 0x5a7e5f, + 0x5ae51f, 0x5ae95f, 0x5aef84, 0x5c51fa, 0x5caf84, 0x5e4562, 0x5e516f, + 0x5e521f, 0x5e5f62, 0x5e6c5f, 0x5ebc84, 0x5ec95f, 0x5ecf84, 0x5ef6b4, + 0x5ef846, 0x5efb41, 0x5efb84, 0x62a898, 0x62abcb, 0x62bcdb, 0x62cbec, + 0x62dbdf, 0x62dbed, 0x62deda, 0x62ecde, 0x62ecef, 0x6898a3, 0x6a3ded, + 0x6ad4ed, 0x6ae5de, 0x6b4cba, 0x6b4dcb, 0x6b4fec, 0x6bae34, 0x6bcdb3, + 0x6c5bca, 0x6c5ebc, 0x6c5fdb, 0x6cad35, 0x6cbec3, 0x6d352b, 0x6d42ed, + 0x6db4ed, 0x6dbd3f, 0x6dbed3, 0x6dbf5e, 0x6e342c, 0x6e52de, 0x6ec5de, + 0x6ecde3, 0x6ece3f, 0x6ecf4d, 0x71271f, 0x7389b8, 0x738b8f, 0x7398c9, + 0x739c9f, 0x73b8cb, 0x73bcb6, 0x73c9bc, 0x76de4d, 0x76ed5e, 0x7a27af, + 0x846cad, 0x8486af, 0x84876f, 0x848fa7, 0x8498a7, 0x84ba98, 0x84d986, + 0x84db98, 0x84dfc9, 0x84edc9, 0x84fae9, 0x84fc9a, 0x84fe6c, 0x84fe96, + 0x84fec9, 0x86ae34, 0x86c24a, 0x86ca34, 0x86cad3, 0x86ec34, 0x876e34, + 0x89486a, 0x894876, 0x894ad8, 0x89816a, 0x898736, 0x898a31, 0x898a73, + 0x89a348, 0x89ad83, 0x89b8a3, 0x89d863, 0x89db83, 0x8a348f, 0x8a7e34, + 0x8ad8f3, 0x8b2a98, 0x8d3fc9, 0x8d86f3, 0x956bae, 0x9589a7, 0x9596af, + 0x95976f, 0x959fa7, 0x95ca89, 0x95deb8, 0x95e896, 0x95ec89, 0x95efb8, + 0x95fad8, 0x95fb8a, 0x95fd6b, 0x95fd86, 0x95fdb8, 0x96ad35, 0x96b25a, + 0x96ba35, 0x96bae3, 0x96db35, 0x976d35, 0x98596a, 0x985976, 0x985ae9, + 0x98a359, 0x98ae93, 0x98c9a3, 0x98e963, 0x98ec93, 0x9a359f, 0x9a7d35, + 0x9ae9f3, 0x9c2a89, 0x9e3fb8, 0x9e96f3, 0xa31454, 0xa31bcb, 0xa34584, + 0xa35495, 0xa73bcb, 0xa73ded, 0xa74543, 0xa7d4ed, 0xa7e5de, 0xad3518, + 0xad41ed, 0xad84ed, 0xad8ed3, 0xad8f5e, 0xae3419, 0xae51de, 0xae95de, + 0xae9de3, 0xae9f4d, 0xb2a518, 0xb32518, 0xb41dcb, 0xb41fec, 0xb84cba, + 0xb84dcb, 0xb84fec, 0xb8ae34, 0xb8b2af, 0xb8ba3f, 0xb8d3cb, 0xb8f5ae, + 0xb8f5ca, 0xb8f5ec, 0xb8fe3c, 0xba3518, 0xbae318, 0xbae341, 0xbae738, + 0xbc4ba1, 0xbc8b2a, 0xbc8ba3, 0xbcb21a, 0xbcb316, 0xbcb6a3, 0xbcdb31, + 0xbd73cb, 0xc2a419, 0xc32419, 0xc51ebc, 0xc51fdb, 0xc95bca, 0xc95ebc, + 0xc95fdb, 0xc9ad35, 0xc9c2af, 0xc9ca3f, 0xc9e3bc, 0xc9f4ad, 0xc9f4ba, + 0xc9f4db, 0xc9fd3b, 0xca3419, 0xcad319, 0xcad351, 0xcad739, 0xcb5ca1, + 0xcb9c2a, 0xcb9ca3, 0xcbec31, 0xce73bc, 0xd1796b, 0xd3196b, 0xd3516b, + 0xd35186, 0xd351b8, 0xd3521b, 0xd3bf95, 0xd416ed, 0xd421ed, 0xd7396b, + 0xd864ed, 0xd86f5e, 0xdb41ed, 0xdb84ed, 0xdb8ed3, 0xdb8f5e, 0xdbd31f, + 0xdbd73f, 0xdbf51e, 0xdbf95e, 0xdbf9e3, 0xde8d36, 0xdebd31, 0xdebd73, + 0xded16a, 0xded763, 0xdeda31, 0xe1786c, 0xe3186c, 0xe3416c, 0xe34196, + 0xe341c9, 0xe3421c, 0xe3cf84, 0xe516de, 0xe521de, 0xe7386c, 0xe965de, + 0xe96f4d, 0xec51de, 0xec95de, 0xec9de3, 0xec9f4d, 0xece31f, 0xece73f, + 0xecf41d, 0xecf84d, 0xecf8d3, 0xed9e36, 0xedce31, 0xedce73, 0xf4ba84, + 0xf4d846, 0xf5ca95, 0xf5e956, 0xf8d3b8, 0xf9e3c9, 0xfb41db, 0xfc51ec, + 0x12712898, 0x12712ded, 0x1289d812, 0x128d812f, 0x1298e912, 0x129e912f, + 0x12bcb712, 0x12d812ed, 0x12e912de, 0x14139146, 0x1416ca34, 0x141914a7, + 0x1419a314, 0x142ae714, 0x142c7124, 0x14712914, 0x14914ae9, 0x14914c9a, + 0x14914e96, 0x14e7146c, 0x15138156, 0x1516ba35, 0x151815a7, 0x1518a315, + 0x152ad715, 0x152b7125, 0x15712815, 0x15815ad8, 0x15815b8a, 0x15815d86, + 0x15d7156b, 0x1686c6a8, 0x168c68ec, 0x1696b6a9, 0x169b69db, 0x16abeabe, + 0x16acdacd, 0x16bfbefb, 0x16cfcdfc, 0x17145b41, 0x17154c51, 0x171b41cb, + 0x171c51bc, 0x17419164, 0x17518165, 0x176b96b9, 0x176c86c8, 0x178151b8, + 0x1787e7b8, 0x178f8cf8, 0x179141c9, 0x1797d7c9, 0x179f9bf9, 0x17d7976d, + 0x17e7876e, 0x186c6d86, 0x189cd89c, 0x18b8cbec, 0x18f8aef8, 0x18f8cf8a, + 0x18f8ef86, 0x196b6e96, 0x198be98b, 0x19c9bcdb, 0x19f9adf9, 0x19f9bf9a, + 0x19f9df96, 0x1a7dacad, 0x1a7eabae, 0x1ad8acad, 0x1ae9abae, 0x1b8abeab, + 0x1bde9bde, 0x1bef9ebf, 0x1bf9bfdb, 0x1bfe7bfb, 0x1c9acdac, 0x1cdf8dcf, + 0x1ced8ced, 0x1cf8cfec, 0x1cfd7cfc, 0x1d797bd7, 0x1d797da7, 0x1e787ce7, + 0x1e787ea7, 0x1fdfc9fd, 0x1fefb8fe, 0x2142c24a, 0x214c24ec, 0x2152b25a, + 0x215b25db, 0x21bfbefb, 0x21cfcdfc, 0x21dacada, 0x21eabaea, 0x23289873, + 0x2328d398, 0x2329e389, 0x232d8d3f, 0x232de8d3, 0x232e9e3f, 0x232ed9e3, + 0x237bcb23, 0x242c2a84, 0x243e2384, 0x24546276, 0x248c24ec, 0x252b2a95, + 0x253d2395, 0x259b25db, 0x26289d86, 0x2628d86f, 0x26298e96, 0x2629e96f, + 0x262d86ed, 0x262ded76, 0x262e96de, 0x28986276, 0x28b2db8f, 0x28b2db98, + 0x29c2ec89, 0x29c2ec9f, 0x2a4f49f4, 0x2a5f58f5, 0x2a815181, 0x2a914191, + 0x2b6926db, 0x2c6826ec, 0x2d4284df, 0x2d4284ed, 0x2ded3273, 0x2e5295de, + 0x2e5295ef, 0x314914c9, 0x315815b8, 0x3168c68c, 0x3169b69b, 0x318f8cf8, + 0x319f9bf9, 0x32185185, 0x32194194, 0x3242c284, 0x324f49f4, 0x3252b295, + 0x325f58f5, 0x32737454, 0x32b2521b, 0x32c2421c, 0x32d3531d, 0x32d3573d, + 0x32d7397d, 0x32e3431e, 0x32e3473e, 0x32e7387e, 0x34191b41, 0x343e36b4, + 0x343e3846, 0x343e3b41, 0x3459b459, 0x345b4373, 0x348498c9, 0x34f46cf4, + 0x34f49f46, 0x34f4cf41, 0x35181c51, 0x353d36c5, 0x353d3956, 0x353d3c51, + 0x3548c548, 0x354c5373, 0x359589b8, 0x35f56bf5, 0x35f58f56, 0x35f5bf51, + 0x36243e34, 0x36253d35, 0x362b696b, 0x362c686c, 0x36b4696b, 0x36c5686c, + 0x374b437f, 0x375c537f, 0x37b437cb, 0x37c537bc, 0x38468c68, 0x38bc58bc, + 0x38cf5c8f, 0x38f58fb8, 0x38fc28f8, 0x39569b69, 0x39bf4b9f, 0x39cb49cb, + 0x39f49fc9, 0x39fb29f9, 0x3b23e318, 0x3b2528b2, 0x3b252b62, 0x3c23d319, + 0x3c2429c2, 0x3c242c62, 0x3d3196ad, 0x3d356a3d, 0x3d35a31d, 0x3d76c23d, + 0x3e3186ae, 0x3e346a3e, 0x3e34a31e, 0x3e76b23e, 0x3fbf95fb, 0x3fcf84fc, + 0x416c67d6, 0x4191ad41, 0x4191b41a, 0x4191d421, 0x419db414, 0x4237c243, + 0x428478fc, 0x42c2d421, 0x42cd4284, 0x42d42584, 0x434b8e34, 0x43796243, + 0x43e13436, 0x45471271, 0x4547a27a, 0x4578b784, 0x457ab47a, 0x459b459a, + 0x459d4259, 0x459d4596, 0x459d45c9, 0x459db459, 0x45ad4595, 0x45b6b476, + 0x45cd451c, 0x45db7d47, 0x462696ad, 0x462c242a, 0x46b4f676, 0x46bce4bc, + 0x473e3436, 0x4787b84f, 0x48478b98, 0x484798c9, 0x48498ae9, 0x48498c9a, + 0x48498e96, 0x48498ec9, 0x484bc78b, 0x48795248, 0x4914916a, 0x49f49f76, + 0x49f4e9f6, 0x49f4ec9f, 0x49fca49f, 0x4a3e3473, 0x4ae349e3, 0x4b41cbec, + 0x4b84cbec, 0x4c249c2a, 0x4ca34739, 0x4d4546c5, 0x4d62c242, 0x4d8498c9, + 0x4dc249c2, 0x4dcfc84f, 0x4df49f46, 0x4e346ce3, 0x4e349e36, 0x4e34ce31, + 0x4e34ce73, 0x4ef421ef, 0x4ef84ef6, 0x4efb41ef, 0x4efb84ef, 0x4efe6b4f, + 0x4f46aef4, 0x4f46cf4a, 0x4f47ef46, 0x4f49f46a, 0x4f49f4a7, 0x4f4aef41, + 0x4f4aef84, 0x4f4cf41a, 0x4f4ef416, 0x4f9c279c, 0x4fe4f462, 0x4fecf84f, + 0x516b67e6, 0x5181ae51, 0x5181c51a, 0x5181e521, 0x518ec515, 0x5237b253, + 0x529579fb, 0x52b2e521, 0x52be5295, 0x52e52495, 0x535c9d35, 0x53786253, + 0x53d13536, 0x5479c795, 0x547ac57a, 0x548c548a, 0x548e5248, 0x548e5486, + 0x548e54b8, 0x548ec548, 0x54ae5484, 0x54be541b, 0x54c6c576, 0x54ec7e57, + 0x562686ae, 0x562b252a, 0x56c5f676, 0x56cbd5cb, 0x573d3536, 0x5797c95f, + 0x5815816a, 0x58f58f76, 0x58f5d8f6, 0x58f5db8f, 0x58fba58f, 0x595789b8, + 0x59579c89, 0x59589ad8, 0x59589b8a, 0x59589d86, 0x59589db8, 0x595cb79c, + 0x5a3d3573, 0x5ad358d3, 0x5b258b2a, 0x5ba35738, 0x5c51bcdb, 0x5c95bcdb, + 0x5d356bd3, 0x5d358d36, 0x5d35bd31, 0x5d35bd73, 0x5df521df, 0x5df95df6, + 0x5dfc51df, 0x5dfc95df, 0x5dfd6c5f, 0x5e456b4b, 0x5e62b252, 0x5e9589b8, + 0x5eb258b2, 0x5ebfb95f, 0x5ef58f56, 0x5f56adf5, 0x5f56bf5a, 0x5f57df56, + 0x5f58f56a, 0x5f58f5a7, 0x5f5adf51, 0x5f5adf95, 0x5f5bf51a, 0x5f5df516, + 0x5f8b278b, 0x5fd5f562, 0x5fdbf95f, 0x62767bcb, 0x6286c6a8, 0x6286c768, + 0x62876e78, 0x6296b6a9, 0x6296b769, 0x62976d79, 0x62adcadc, 0x62aebaeb, + 0x62b252db, 0x62bfbefb, 0x62c242ec, 0x62cfcdfc, 0x67bc4b67, 0x67cb5c67, + 0x68c685ca, 0x68c68ec3, 0x69b694ba, 0x69b69db3, 0x6abaea3b, 0x6abeab5e, + 0x6acada3c, 0x6acdac4d, 0x6b4abeab, 0x6bdbed5e, 0x6bfbefb3, 0x6c5acdac, + 0x6cecde4d, 0x6cfcdfc3, 0x6dfcdf4d, 0x6efbef5e, 0x6f4fecf4, 0x6f5fdbf5, + 0x7174b41f, 0x7175c51f, 0x738f8cf8, 0x739f9bf9, 0x73b696b6, 0x73c686c6, + 0x73d7976d, 0x73d97dc9, 0x73e7876e, 0x73e87eb8, 0x768e785e, 0x769d794d, + 0x7898a72a, 0x78be785e, 0x79cd794d, 0x7a7b4baf, 0x7a7bc4ba, 0x7a7c5caf, + 0x7a7cb5ca, 0x7bcba27a, 0x7bd47dbf, 0x7bd74dcb, 0x7ce57ecf, 0x7ce75ebc, + 0x7d4a7cad, 0x7e5a7bae, 0x812ca781, 0x818db518, 0x8467c687, 0x8478be78, + 0x84a7e787, 0x84d68c68, 0x84e78ce7, 0x84e78e76, 0x84f8cf8a, 0x8518e516, + 0x85f5ad8f, 0x86aea24a, 0x86c6d863, 0x8712e781, 0x87b82bfe, 0x894d2482, + 0x89cd89c3, 0x89dad82a, 0x8a7aca34, 0x8a7e7873, 0x8ad8fa2a, 0x8ade58de, + 0x8b28bcdb, 0x8b28dbed, 0x8b2c978b, 0x8b2ce8bc, 0x8b8cb5ca, 0x8b8cb5ec, + 0x8b8cbec3, 0x8c68c6a3, 0x8cf85cfa, 0x8cf85ecf, 0x8cf8cf2a, 0x8cfe38cf, + 0x8d86ed5e, 0x8db8ed5e, 0x8e312c81, 0x8e78ce73, 0x8f5efb8f, 0x8f8a35f8, + 0x8f8aef83, 0x8f8cf8a3, 0x8f8ef863, 0x8fce72ce, 0x912ba791, 0x919ec419, + 0x9419d416, 0x94f4ae9f, 0x9567b697, 0x9579cd79, 0x95a7d797, 0x95d79bd7, + 0x95d79d76, 0x95e69b69, 0x95f9bf9a, 0x96ada25a, 0x96b6e963, 0x9712d791, + 0x97c92cfd, 0x985e2592, 0x98be98b3, 0x98eae92a, 0x9a7aba35, 0x9a7d7973, + 0x9ae9fa2a, 0x9aed49ed, 0x9b69b6a3, 0x9bf94bfa, 0x9bf94dbf, 0x9bf9bf2a, + 0x9bfd39bf, 0x9c29cbec, 0x9c29ecde, 0x9c2bd9cb, 0x9c9bc4ba, 0x9c9bc4db, + 0x9c9bcdb3, 0x9d312b91, 0x9d79bd73, 0x9e96de4d, 0x9ec9de4d, 0x9f4dfc9f, + 0x9f9a34f9, 0x9f9adf93, 0x9f9bf9a3, 0x9f9df963, 0x9fbd72bd, 0xa27a2ded, + 0xa2de8da2, 0xa2ed9ea2, 0xa348e34e, 0xa359d35d, 0xa7b2a52b, 0xa7bae2ab, + 0xa7baea3b, 0xa7c2a42c, 0xa7cad2ac, 0xa7cada3c, 0xa7d4797d, 0xa7dfd5fd, + 0xa7e5787e, 0xa7efe4fe, 0xabeab5e1, 0xabeab9e3, 0xacdac4d1, 0xacdac8d3, + 0xaf8f5ef8, 0xaf9f4df9, 0xb2562b76, 0xb258b2db, 0xb2db7df5, 0xb41abeab, + 0xb6b4d96b, 0xb84abeab, 0xb8a2eab2, 0xb8fbefb3, 0xb96b596a, 0xbd7bde4d, + 0xbd7e5bde, 0xbd7ec2bd, 0xbdbed5e1, 0xbdbed95e, 0xbdbed9e3, 0xbeabea31, + 0xbef51bef, 0xbefb9ef3, 0xbefbef73, 0xbfbefb31, 0xbfe527e5, 0xc2462c76, + 0xc249c2ec, 0xc2ec7ef4, 0xc51acdac, 0xc6c5e86c, 0xc86c486a, 0xc95acdac, + 0xc9a2dac2, 0xc9fcdfc3, 0xcdacda31, 0xcdf41cdf, 0xcdfc8df3, 0xcdfcdf73, + 0xce7ced5e, 0xce7d4ced, 0xcecde4d1, 0xcecde84d, 0xcecde8d3, 0xcfcdfc31, + 0xcfd427d4, 0xd35b8d3d, 0xd425e7d4, 0xd427d4f9, 0xd79a7d2a, 0xd79bd74d, + 0xdad84cad, 0xdbf51fdf, 0xdcad9ca3, 0xdf597259, 0xe34c9e3e, 0xe527e5f8, + 0xe78a7e2a, 0xe78ce75e, 0xeae95bae, 0xebae8ba3, 0xecf41fef, 0xef487248, + 0x124914e912, 0x125815d812, 0x128f8ef812, 0x129f9df912, 0x141361c614, + 0x141712e714, 0x141e714ce7, 0x1461c614ec, 0x151361b615, 0x151712d715, + 0x151d715bd7, 0x1561b615db, 0x1714f4cf41, 0x1715f5bf51, 0x1b676be76b, + 0x1c676cd76c, 0x2142a2ea24, 0x2152a2da25, 0x21b21912db, 0x21c21812ec, + 0x2328fe38f8, 0x2329fd39f9, 0x234e3429e3, 0x235d3528d3, 0x242a2ea284, + 0x242cd427d4, 0x252a2da295, 0x252be527e5, 0x2628f8ef86, 0x2629f9df96, + 0x26b96b2e96, 0x26c86c2d86, 0x281832e318, 0x28b278be78, 0x28bfe2b8fb, + 0x291932d319, 0x29c279cd79, 0x29cfd2c9fc, 0x2ab212912b, 0x2ac212812c, + 0x2b258b278b, 0x2c249c279c, 0x2d427d497d, 0x2d4f924df4, 0x2dad6296ad, + 0x2e527e587e, 0x2e5f825ef5, 0x2eae6286ae, 0x2f4d724d24, 0x2f5e725e25, + 0x2fb872b82b, 0x2fc972c92c, 0x31813e31b8, 0x31913d31c9, 0x34b7e34e37, + 0x34f4cf4373, 0x35c7d35d37, 0x35f5bf5373, 0x381218c218, 0x391219b219, + 0x3d319a313d, 0x3d3237c23d, 0x3d3c23d9c2, 0x3e318a313e, 0x3e3237b23e, + 0x3e3b23e8b2, 0x41467c6174, 0x41471e7164, 0x416cd41614, 0x419c1d419c, + 0x424d4257d4, 0x42624962a4, 0x4262962432, 0x4262962d42, 0x42842784f2, + 0x42c7842784, 0x4327397343, 0x43a31ca343, 0x43a348ca34, 0x43ca349ca3, + 0x4528478424, 0x462ae42a24, 0x47397343c9, 0x473ac7a434, 0x4787f84cf8, + 0x4846c686ec, 0x48498e98b8, 0x49fbefb49f, 0x4a37937434, 0x4cf97f9c4f, + 0x4cfdfc7d4f, 0x4d4548c548, 0x4f47acf47a, 0x4f842ef482, 0x4f97bf94fb, + 0x51567b6175, 0x51571d7165, 0x516be51615, 0x518b1e518b, 0x525e5247e5, + 0x52625862a5, 0x5262862532, 0x5262862e52, 0x52952795f2, 0x52b7952795, + 0x5327387353, 0x53a31ba353, 0x53a359ba35, 0x53ba358ba3, 0x5429579525, + 0x562ad52a25, 0x57387353b8, 0x573ab7a535, 0x5797f95bf9, 0x58fcdfc58f, + 0x5956b696db, 0x59589d89c9, 0x5a37837535, 0x5bf87f8b5f, 0x5bfefb7e5f, + 0x5e5459b459, 0x5f57abf57a, 0x5f87cf85fc, 0x5f952df592, 0x674fc674f4, + 0x675fb675f5, 0x678c685c67, 0x679b694b67, 0x6861a6ea68, 0x6862562768, + 0x68a6ea5e68, 0x6961a6da69, 0x6962462769, 0x69a6da4d69, 0x7185187c51, + 0x7194197b41, 0x73d767c67d, 0x73e767b67e, 0x7687357378, 0x7697347379, + 0x76d76c674d, 0x76e76b675e, 0x7842784e78, 0x7952795d79, 0x7adcad75ca, + 0x7aebae74ba, 0x7bd72bd52b, 0x7bdf57dbfd, 0x7ce72ce42c, 0x7cef47ecfe, + 0x7d79bd72bd, 0x7e78ce72ce, 0x7fdb27db7d, 0x7fec27ec7e, 0x81316e3181, + 0x81721c2181, 0x81e318ce31, 0x8468a6ea68, 0x847aca78a7, 0x8784284798, + 0x87a78ca738, 0x87a7ca7817, 0x897b82b878, 0x8b8aeaba5e, 0x8b8cb5cbdb, + 0x8efc2fce8f, 0x8fc2dfc8fd, 0x91316d3191, 0x91721b2191, 0x91d319bd31, + 0x9569a6da69, 0x957aba79a7, 0x9795295789, 0x97a79ba739, 0x97a7ba7917, + 0x987c92c979, 0x9c9adaca4d, 0x9c9bc4bcec, 0x9dfb2fbd9f, 0x9fb2efb9fe, + 0xa28fea28f8, 0xa29fda29f9, 0xa2beab9ea2, 0xa2cdac8da2, 0xaba356a3ab, + 0xaba79a72ab, 0xaca346a3ac, 0xaca78a72ac, 0xb232be321b, 0xb232e32b62, + 0xb2b87b82cb, 0xb616a516b6, 0xb6276e76b6, 0xbc2db7db2b, 0xbd5359bd35, + 0xbdbed9ed4d, 0xbf4efb7ef4, 0xc232cd321c, 0xc232d32c62, 0xc2c97c92bc, + 0xc616a416c6, 0xc6276d76c6, 0xcb2ec7ec2c, 0xce4348ce34, 0xcecde8de5e, + 0xcf5dfc7df5, 0xd717517da7, 0xd7db2db7ed, 0xda6a396ada, 0xda72a52ada, + 0xde74d24d7d, 0xdf528f5df8, 0xe717417ea7, 0xe7ec2ec7de, 0xea6a386aea, + 0xea72a42aea, 0xed75e25e7e, 0xef429f4ef9, 0x14161a6ea614, + 0x141714be7141, 0x1417a71ca714, 0x14b914be914b, 0x15161a6da615, + 0x151715cd7151, 0x1517a71ba715, 0x15c815cd815c, 0x1812cd812181, + 0x1912be912191, 0x2181a2aea218, 0x2191a2ada219, 0x232b2e32b9e3, + 0x232c2d32c8d3, 0x246249624e96, 0x24d429ed49ed, 0x256258625d86, + 0x25e528de58de, 0x28b298be98be, 0x29c289cd89cd, 0x31d31c613dc6, + 0x31e31b613eb6, 0x34379b437343, 0x343e93eb43e9, 0x35378c537353, + 0x353d83dc53d8, 0x3d326239623d, 0x3e326238623e, 0x436a96a3a439, + 0x437367c67343, 0x45c8457845c8, 0x47ecb4ecb7ec, 0x4842984e9842, + 0x49cb49cb79c7, 0x4a24ea249ea2, 0x4b46b496be96, 0x4d454c54d7d4, + 0x536a86a3a538, 0x537367b67353, 0x54b9547954b9, 0x57dbc5dbc7db, + 0x58bc58bc78b7, 0x5952895d8952, 0x5a25da258da2, 0x5c56c586cd86, + 0x5e545b45e7e5, 0x6b67e4b676b6, 0x6c67d5c676c6, 0x78a78ca785ca, + 0x79a79ba794ba, 0x813a31ca3181, 0x8ced8ced2ce2, 0x8d8ad8cad5ca, + 0x913a31ba3191, 0x9bde9bde2bd2, 0x9e9ae9bae4ba, + }; + + map[222] = { + 0x1af, 0x1232f, 0x1454a, 0x1676f, 0x1898a, 0x1abcb, 0x1aded, 0x1bcdb, + 0x1bdbf, 0x1cbec, 0x1cecf, 0x1dbed, 0x1ecde, 0x2362f, 0x3273f, 0x4584a, + 0x5495a, 0x6276f, 0x7367f, 0x8498a, 0x9589a, 0xa262f, 0xa373f, 0xa484f, + 0xa595f, 0x121712f, 0x1232898, 0x1232ded, 0x1234542, 0x1245d42, + 0x124d42f, 0x1254e52, 0x125e52f, 0x12bcb32, 0x131613f, 0x1345b43, + 0x134b43f, 0x1354c53, 0x135c53f, 0x141914a, 0x1423c24, 0x142c24a, + 0x1432e34, 0x143e34a, 0x1454676, 0x1454cec, 0x14bdb54, 0x151815a, + 0x1523b25, 0x152b25a, 0x1532d35, 0x153d35a, 0x1676ded, 0x1678986, + 0x1689d86, 0x168d86f, 0x1698e96, 0x169e96f, 0x16bcb76, 0x1789b87, + 0x178b87f, 0x1798c97, 0x179c97f, 0x1867c68, 0x186c68a, 0x1876e78, + 0x187e78a, 0x1898cec, 0x18bdb98, 0x1967b69, 0x196b69a, 0x1976d79, + 0x197d79a, 0x1abaeab, 0x1acadac, 0x1b252db, 0x1b434cb, 0x1b696db, + 0x1b878cb, 0x1befbef, 0x1c242ec, 0x1c535bc, 0x1c686ec, 0x1c979bc, + 0x1cdfcdf, 0x1d353bd, 0x1d424ed, 0x1d797bd, 0x1d868ed, 0x1e343ce, + 0x1e525de, 0x1e787ce, 0x1e969de, 0x2324584, 0x2325495, 0x232848f, + 0x2328498, 0x2329589, 0x232959f, 0x234196b, 0x2345462, 0x235186c, + 0x2362bcb, 0x2389862, 0x238b28f, 0x238b298, 0x239c289, 0x239c29f, + 0x23b8cb2, 0x23c9bc2, 0x23ded62, 0x245462a, 0x245d462, 0x24d4f62, + 0x24dfc95, 0x254e562, 0x25e5f62, 0x25efb84, 0x262a898, 0x262bcdb, + 0x262cbec, 0x262dbdf, 0x262dbed, 0x262deda, 0x262ecde, 0x262ecef, + 0x26bae34, 0x26cad35, 0x26d42ed, 0x26e52de, 0x27a27af, 0x28b2a8f, + 0x28b2a98, 0x29c2a89, 0x29c2a9f, 0x2b8f5ec, 0x2bc8b2a, 0x2bcb62a, + 0x2c9f4db, 0x2cb9c2a, 0x324197d, 0x3245473, 0x325187e, 0x3273ded, + 0x3289873, 0x328d38f, 0x328d398, 0x329e389, 0x329e39f, 0x32bcb73, + 0x32d8ed3, 0x32e9de3, 0x345473a, 0x345b473, 0x34b4f73, 0x34bfe95, + 0x354c573, 0x35c5f73, 0x35cfd84, 0x36a36af, 0x373a898, 0x373bcba, + 0x373bdbf, 0x373bdcb, 0x373cebc, 0x373cecf, 0x373dbed, 0x373ecde, + 0x37b43cb, 0x37c53bc, 0x37dac24, 0x37eab25, 0x38d3a8f, 0x38d3a98, + 0x39e3a89, 0x39e3a9f, 0x3d8f5ce, 0x3de8d3a, 0x3ded73a, 0x3e9f4bd, + 0x3ed9e3a, 0x423c284, 0x42c2a84, 0x432e384, 0x43e3a84, 0x452178b, + 0x453168d, 0x4546276, 0x4547367, 0x4567684, 0x456b46a, 0x456b476, + 0x457d467, 0x457d47a, 0x4584bdb, 0x45b6db4, 0x45cec84, 0x45d7bd4, + 0x467684f, 0x46b4f6a, 0x46b4f76, 0x47d4f67, 0x47d4f7a, 0x484abcb, + 0x484aded, 0x484bcdb, 0x484cbec, 0x484cecf, 0x484dbed, 0x484edce, + 0x48c24ec, 0x48e34ce, 0x49f49fa, 0x4b6a3ed, 0x4bd6b4f, 0x4bdb84f, + 0x4d7a2cb, 0x4db7d4f, 0x523b295, 0x52b2a95, 0x532d395, 0x53d3a95, + 0x542179c, 0x543169e, 0x5467695, 0x546c56a, 0x546c576, 0x547e567, + 0x547e57a, 0x5495cec, 0x54bdb95, 0x54c6ec5, 0x54e7ce5, 0x567695f, + 0x56c5f6a, 0x56c5f76, 0x57e5f67, 0x57e5f7a, 0x58f58fa, 0x595abcb, + 0x595aded, 0x595bcdb, 0x595bdbf, 0x595cbec, 0x595debd, 0x595ecde, + 0x59b25db, 0x59d35bd, 0x5c6a3de, 0x5ce6c5f, 0x5cec95f, 0x5e7a2bc, + 0x5ec7e5f, 0x6276898, 0x6276bcb, 0x6289d86, 0x628d86f, 0x6298e96, + 0x629e96f, 0x62bae78, 0x62cad79, 0x62d86ed, 0x62ded76, 0x62e96de, + 0x6768498, 0x6769589, 0x678152b, 0x679142c, 0x67b4cb6, 0x67c5bc6, + 0x68dfc59, 0x69efb48, 0x6b4f9ec, 0x6bc4b6a, 0x6c5f8db, 0x6cb5c6a, + 0x7367898, 0x7367ded, 0x7389b87, 0x738b87f, 0x7398c97, 0x739c97f, + 0x73b87cb, 0x73bcb67, 0x73c97bc, 0x73dac68, 0x73eab69, 0x768153d, + 0x769143e, 0x76d4ed7, 0x76e5de7, 0x78bfe59, 0x79cfd48, 0x7d4f9ce, + 0x7de4d7a, 0x7e5f8bd, 0x7ed5e7a, 0x8467c68, 0x846c68a, 0x8476e78, + 0x847e78a, 0x8498bdb, 0x84c68ec, 0x84cec98, 0x84e78ce, 0x896134b, + 0x897124d, 0x89b2db8, 0x89d3bd8, 0x8b2a7ed, 0x8bd2b8f, 0x8d3a6cb, + 0x8db3d8f, 0x9567b69, 0x956b69a, 0x9576d79, 0x957d79a, 0x9589cec, + 0x95b69db, 0x95bdb89, 0x95d79bd, 0x986135c, 0x987125e, 0x98c2ec9, + 0x98e3ce9, 0x9c2a7de, 0x9ce2c9f, 0x9e3a6bc, 0x9ec3e9f, 0xab2562b, + 0xab6296b, 0xac2462c, 0xac6286c, 0xad3573d, 0xad7397d, 0xae3473e, + 0xae7387e, 0xb2562db, 0xb4384cb, 0xb4834bf, 0xb6296db, 0xb8478bf, + 0xb8478cb, 0xc2462ec, 0xc5395bc, 0xc5935cf, 0xc6286ec, 0xc9579bc, + 0xc9579cf, 0xd3573bd, 0xd4284ed, 0xd4824df, 0xd7397bd, 0xd8468df, + 0xd8468ed, 0xe3473ce, 0xe5295de, 0xe5925ef, 0xe7387ce, 0xe9569de, + 0xe9569ef, 0x12189d812, 0x12198e912, 0x121d812ed, 0x121d8df12, + 0x121ded712, 0x121e912de, 0x121e9ef12, 0x123249149, 0x123258158, + 0x124191d42, 0x1242c2d42, 0x124efe4f2, 0x125181e52, 0x1252b2e52, + 0x125dfd5f2, 0x128157128, 0x128712e78, 0x129147129, 0x129712d79, + 0x12bcb7127, 0x13189b813, 0x13198c913, 0x131b813cb, 0x131b8bf13, + 0x131bcb613, 0x131c913bc, 0x131c9cf13, 0x134191b43, 0x1343e3b43, + 0x134cfc4f3, 0x135181c53, 0x1353d3c53, 0x135bfb5f3, 0x138156138, + 0x138613c68, 0x139146139, 0x139613b69, 0x13ded6136, 0x14167c614, + 0x14176e714, 0x141c614ec, 0x141c6ca14, 0x141cec914, 0x141e714ce, + 0x141e7ea14, 0x142171c24, 0x142a2ea24, 0x143161e34, 0x143a3ca34, + 0x145427127, 0x145436136, 0x146914e96, 0x147914c97, 0x14bdb9149, + 0x15167b615, 0x15176d715, 0x151b615db, 0x151b6ba15, 0x151bdb815, + 0x151d715bd, 0x151d7da15, 0x152171b25, 0x152a2da25, 0x153161d35, + 0x153a3ba35, 0x156815d86, 0x157815b87, 0x15cec8158, 0x16145d416, + 0x16154e516, 0x161d416ed, 0x161d4df16, 0x161e516de, 0x161e5ef16, + 0x167641914, 0x167651815, 0x1686c6d86, 0x168efe8f6, 0x1696b6e96, + 0x169dfd9f6, 0x17145b417, 0x17154c517, 0x171b417cb, 0x171b4bf17, + 0x171c517bc, 0x171c5cf17, 0x1787e7b87, 0x178cfc8f7, 0x1797d7c97, + 0x179bfb9f7, 0x18123c218, 0x18132e318, 0x181c218ec, 0x181c2ca18, + 0x181e318ce, 0x181e3ea18, 0x186a6ea68, 0x187a7ca78, 0x189821712, + 0x189831613, 0x19123b219, 0x19132d319, 0x191b219db, 0x191b2ba19, + 0x191d319bd, 0x191d3da19, 0x196a6da69, 0x197a7ba79, 0x232b2562b, + 0x232b6296b, 0x232c2462c, 0x232c6286c, 0x232d3573d, 0x232d7397d, + 0x232e3473e, 0x232e7387e, 0x232f49f49, 0x232f58f58, 0x234c249c2, + 0x235b258b2, 0x238fc28f8, 0x239fb29f9, 0x23d62535d, 0x23d76c23d, + 0x23e62434e, 0x23e76b23e, 0x24237c243, 0x242c26d42, 0x242cd4284, + 0x243796243, 0x24547a27a, 0x2459d4259, 0x248795248, 0x24c249c2a, + 0x24d542484, 0x24dc249c2, 0x24f4ef462, 0x25237b253, 0x252b26e52, + 0x252be5295, 0x253786253, 0x2548e5248, 0x25b258b2a, 0x25e452595, + 0x25eb258b2, 0x25f5df562, 0x26286c768, 0x262876e78, 0x26296b769, + 0x262976d79, 0x262abaeab, 0x262acadac, 0x262fbefbe, 0x262fcdfcd, + 0x27a27a898, 0x27a27abcb, 0x27a27aded, 0x28b28bcdb, 0x28b28dbed, + 0x28b2ce8bc, 0x28fc2a8f8, 0x29c29cbec, 0x29c29ecde, 0x29c2bd9cb, + 0x29fb2a9f9, 0x2a28da28f, 0x2a28da298, 0x2a29ea289, 0x2a29ea29f, + 0x2a7b2a52b, 0x2a7bae2ab, 0x2a7c2a42c, 0x2a7cad2ac, 0x2ade8da2a, + 0x2aed9ea2a, 0x2b2562b76, 0x2b258b2db, 0x2b8ab2eab, 0x2bd7ec2bd, + 0x2c2462c76, 0x2c249c2ec, 0x2c9ac2dac, 0x2d42d8498, 0x2d7a297da, + 0x2e52e9589, 0x2e7a287ea, 0x324e349e3, 0x325d358d3, 0x328fe38f8, + 0x329fd39f9, 0x343e37b43, 0x343eb4384, 0x34546a36a, 0x3459b4359, + 0x348695348, 0x34b543484, 0x34be349e3, 0x34e349e3a, 0x34f4cf473, + 0x353d37c53, 0x353dc5395, 0x3548c5348, 0x35c453595, 0x35cd358d3, + 0x35d358d3a, 0x35f5bf573, 0x36a36a898, 0x36a36abcb, 0x36a36aded, + 0x373867c68, 0x37387e678, 0x373967b69, 0x37397d679, 0x373abeabe, + 0x373acdacd, 0x373fbfefb, 0x373fcfdfc, 0x38d38bdcb, 0x38d38debd, + 0x38d3ec8de, 0x38fe3a8f8, 0x39e39cebc, 0x39e39edce, 0x39e3db9ed, + 0x39fd3a9f9, 0x3a38ba38f, 0x3a38ba398, 0x3a39ca389, 0x3a39ca39f, + 0x3a6d3a53d, 0x3a6dac3ad, 0x3a6e3a43e, 0x3a6eab3ae, 0x3abc8ba3a, + 0x3acb9ca3a, 0x3b43b8498, 0x3b6a396ba, 0x3c53c9589, 0x3c6a386ca, + 0x3d3573d67, 0x3d358d3bd, 0x3d8ad3cad, 0x3db6ce3db, 0x3e3473e67, + 0x3e349e3ce, 0x3e9ae3bae, 0x42a2ea284, 0x42cd427d4, 0x42d4257d4, + 0x42d427d4f, 0x43a3ca384, 0x43b4356b4, 0x43b436b4f, 0x43eb436b4, + 0x454b8478b, 0x454c9579c, 0x454d8468d, 0x454e9569e, 0x456ad46a6, + 0x457ab47a7, 0x45c98d45c, 0x45e98b45e, 0x46ad4f6a6, 0x46b46bdcb, + 0x46b46cbec, 0x46b4de6bd, 0x47ab4f7a7, 0x47d47dbed, 0x47d47edce, + 0x47d4bc7db, 0x48468d986, 0x484698e96, 0x48478b987, 0x484798c97, + 0x484abaeab, 0x484acadac, 0x484fbfefb, 0x484fcdfcd, 0x49f46769f, + 0x49f49cecf, 0x49f49fbdb, 0x4a6cfc64f, 0x4a7efe74f, 0x4b436b4cb, + 0x4b6fb4efb, 0x4bc9ed4bc, 0x4c9f479cf, 0x4cfd9f4cf, 0x4d427d4ed, + 0x4d7fd4cfd, 0x4e9f469ef, 0x4efb9f4ef, 0x4f46cf476, 0x4f47ef467, + 0x4f9b4f34b, 0x4f9d4f24d, 0x4fce6cf4f, 0x4fec7ef4f, 0x52a2da295, + 0x52be527e5, 0x52e5247e5, 0x52e527e5f, 0x53a3ba395, 0x53c5346c5, + 0x53c536c5f, 0x53dc536c5, 0x546ae56a6, 0x547ac57a7, 0x56ae5f6a6, + 0x56c56bcdb, 0x56c56cebc, 0x56c5ed6ce, 0x57ac5f7a7, 0x57e57debd, + 0x57e57ecde, 0x57e5cb7ec, 0x58f56768f, 0x58f58bdbf, 0x58f58fcec, + 0x595689d86, 0x59569e896, 0x595789b87, 0x59579c897, 0x595abaeab, + 0x595acadac, 0x595fbefbe, 0x595fcfdfc, 0x5a6bfb65f, 0x5a7dfd75f, + 0x5b8f578bf, 0x5bfe8f5bf, 0x5c536c5bc, 0x5c6fc5dfc, 0x5cb8de5cb, + 0x5d8f568df, 0x5dfc8f5df, 0x5e527e5de, 0x5e7fe5bfe, 0x5f56bf576, + 0x5f57df567, 0x5f8c5f35c, 0x5f8e5f25e, 0x5fbd6bf5f, 0x5fdb7df5f, + 0x6286c6d86, 0x628f8ef86, 0x6296b6e96, 0x629f9df96, 0x678c685c6, + 0x679b694b6, 0x686cd8648, 0x68c685c6a, 0x68dc685c6, 0x696be9659, + 0x69b694b6a, 0x69eb694b6, 0x6ade4da6a, 0x6aed5ea6a, 0x6b46bd96b, + 0x6b4ab6eab, 0x6c56ce86c, 0x6c5ac6dac, 0x7387e7b87, 0x738f8cf87, + 0x7397d7c97, 0x739f9bf97, 0x768e785e7, 0x769d794d7, 0x787eb8748, + 0x78be785e7, 0x78e785e7a, 0x797dc9759, 0x79cd794d7, 0x79d794d7a, + 0x7abc4ba7a, 0x7acb5ca7a, 0x7d47db97d, 0x7d4ad7cad, 0x7e57ec87e, + 0x7e5ae7bae, 0x846a6ea68, 0x847a7ca78, 0x86cd863d8, 0x86d863d8f, + 0x86d8693d8, 0x87b872b8f, 0x87b8792b8, 0x87eb872b8, 0x8b28bc78b, + 0x8b2fb8efb, 0x8d38de68d, 0x8d3fd8cfd, 0x8fce2cf8f, 0x8fec3ef8f, + 0x956a6da69, 0x957a7ba79, 0x96be963e9, 0x96e963e9f, 0x96e9683e9, + 0x97c972c9f, 0x97c9782c9, 0x97dc972c9, 0x9c29cb79c, 0x9c2fc9dfc, + 0x9e39ed69e, 0x9e3fe9bfe, 0x9fbd2bf9f, 0x9fdb3df9f, 0xaba3573ab, + 0xaba7397ab, 0xaca3473ac, 0xaca7387ac, 0xada2562ad, 0xada6296ad, + 0xaea2462ae, 0xaea6286ae, 0xb258b278b, 0xb436b496b, 0xbf5935fbf, + 0xbf9579fbf, 0xc249c279c, 0xc536c586c, 0xcf4834fcf, 0xcf8478fcf, + 0xd358d368d, 0xd427d497d, 0xdf5925fdf, 0xdf9569fdf, 0xe349e369e, + 0xe527e587e, 0xef4824fef, 0xef8468fef, 0x1214914e912, + 0x1215815d812, 0x1218ef812ef, 0x1219df912df, 0x1314914c913, + 0x1315815b813, 0x1318cf813cf, 0x1319bf913bf, 0x1412712e714, + 0x1413613c614, 0x1416a6ea614, 0x1417a7ca714, 0x1512712d715, + 0x1513613b615, 0x1516a6da615, 0x1517a7ba715, 0x1614d914916, + 0x1614ef416ef, 0x1615df516df, 0x1615e815816, 0x1714b914917, + 0x1714cf417cf, 0x1715bf517bf, 0x1715c815817, 0x1812a2ea218, + 0x1812c712718, 0x1813a3ca318, 0x1813e613618, 0x1912a2da219, + 0x1912b712719, 0x1913a3ba319, 0x1913d613619, 0x1b2b32be32b, + 0x1b4b54be54b, 0x1b6b76be76b, 0x1b8b98be98b, 0x1c2c32cd32c, + 0x1c5c45cd45c, 0x1c6c76cd76c, 0x1c9c89cd89c, 0x23426249624, + 0x23526258625, 0x23b23e38b2b, 0x23c23d39c2c, 0x24262962d42, + 0x242842784f2, 0x24284784254, 0x242c7842784, 0x25262862e52, + 0x252952795f2, 0x25295795245, 0x252b7952795, 0x26b232be32b, + 0x26c232cd32c, 0x27842784e78, 0x27952795d79, 0x27db27db97d, + 0x27ec27ec87e, 0x2a28fea28f8, 0x2a29fda29f9, 0x2abeab9ea2a, + 0x2acdac8da2a, 0x2b257db27db, 0x2b2db27dbf2, 0x2b2db7db2cb, + 0x2c247ec27ec, 0x2c2ec27ecf2, 0x2c2ec7ec2bc, 0x32437349734, + 0x32537358735, 0x32d32c28d3d, 0x32e32b29e3e, 0x34373973b43, + 0x343843684f3, 0x34384684354, 0x343e6843684, 0x35373873c53, + 0x353953695f3, 0x35395695345, 0x353d6953695, 0x36843684c68, + 0x36953695b69, 0x36bd36bd96b, 0x36ce36ce86c, 0x37d323dc23d, + 0x37e323eb23e, 0x3a38fca38f8, 0x3a39fba39f9, 0x3adcad9ca3a, + 0x3aebae8ba3a, 0x3d356bd36bd, 0x3d3bd36bdf3, 0x3d3bd6bd3ed, + 0x3e346ce36ce, 0x3e3ce36cef3, 0x3e3ce6ce3de, 0x424624962a4, + 0x434734973a4, 0x45b45e56b4b, 0x45d45c57d4d, 0x46249624e96, + 0x47349734c97, 0x48b454be54b, 0x48d454dc54d, 0x49cb49cb79c, + 0x49ed49ed69e, 0x4a6aef4efa6, 0x4a7acf4cfa7, 0x4abc94bc4b4, + 0x4ade94de4d4, 0x4b439cb49cb, 0x4b4cb9cb4db, 0x4d429ed49ed, + 0x4d4ed9ed4bd, 0x4fbefb7ef4f, 0x4fdcfd6cf4f, 0x525625862a5, + 0x535735873a5, 0x54c54d46c5c, 0x54e54b47e5e, 0x56258625d86, + 0x57358735b87, 0x58bc58bc78b, 0x58de58de68d, 0x59c545cd45c, + 0x59e545eb45e, 0x5a6adf5dfa6, 0x5a7abf5bfa7, 0x5acb85cb5c5, + 0x5aed85ed5e5, 0x5c538bc58bc, 0x5c5bc8bc5ec, 0x5e528de58de, + 0x5e5de8de5ce, 0x5fcdfc7df5f, 0x5febfe6bf5f, 0x62b676be76b, + 0x62c676cd76c, 0x67862562868, 0x67962462969, 0x67b67e74b6b, + 0x67c67d75c6c, 0x68648348698, 0x69659359689, 0x6abeab5ea6a, + 0x6acdac4da6a, 0x6b6db3db6cb, 0x6c6ec3ec6bc, 0x73d767dc67d, + 0x73e767eb67e, 0x76873573878, 0x76973473979, 0x76d76c64d7d, + 0x76e76b65e7e, 0x78748248798, 0x79759259789, 0x7adcad5ca7a, + 0x7aebae4ba7a, 0x7d7bd2bd7ed, 0x7e7ce2ce7de, 0x84b898be98b, + 0x84d898dc98d, 0x89b89e92b8b, 0x89d89c93d8d, 0x8b8cb5cb8db, + 0x8d8ed5ed8bd, 0x8fbefb3ef8f, 0x8fdcfd2cf8f, 0x95c989cd89c, + 0x95e989eb89e, 0x98c98d82c9c, 0x98e98b83e9e, 0x9c9bc4bc9ec, + 0x9e9de4de9ce, 0x9fcdfc3df9f, 0x9febfe2bf9f, 0xaba27a297ab, + 0xaba356a36ab, 0xaca27a287ac, 0xaca346a36ac, 0xada257a27ad, + 0xada36a396ad, 0xaea247a27ae, 0xaea36a386ae, 0xbf49f479fbf, + 0xbf85f835fbf, 0xcf58f578fcf, 0xcf94f934fcf, 0xdf49f469fdf, + 0xdf85f825fdf, 0xef58f568fef, 0xef94f924fef, + }; + // clang-format on + return map; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.hpp b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.hpp new file mode 100644 index 0000000000..b80c5f1be1 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.hpp @@ -0,0 +1,105 @@ +#ifndef _TKET_TokenSwapping_TableLookup_SwapSequenceTable_H_ +#define _TKET_TokenSwapping_TableLookup_SwapSequenceTable_H_ + +#include +#include +#include + +namespace tket { +namespace tsa_internal { + +/** For swaps on vertices {0,1,2,...,5}, return precomputed short swap + * sequences using given sets of edges. Should be close to optimal. + * (Every sequence should have the joint-shortest length amongst all sequences + * using those particular swaps, but not every possible sequence is included). + * + * (The only possibility of non-optimality is that some solutions + * using many edges might be missing. It was constructed using breadth-first + * searches of all possible sequences up to certain depths on various graphs + * with <= 6 vertices. Due to time/space limitations some non-complete graphs + * were searched as well as complete graphs K4, K5, K6. + * + * Of course, ideally we'd search K6 up to depth 16, but searching up to depth 9 + * already consumed ~30 mins of CPU time and most of the memory capacity of an + * ordinary laptop. More efficient exhaustive search algorithms with clever + * pruning might cut it down a bit, but (since each added depth increases the + * difficulty roughly by a factor of 14) it would require significant + * computational effort to reach even depth 12 for K6, and depth 16 probably + * requires a supercomputer, or a very large distributed computation). + * + * The table size is far smaller than the precomputation needed to create it. + * The creation considered millions of sequences, but the table has only a few + * thousand entries. + * + * The table currently contains all swap sequences of length: + * <= 12 on 4 vertices (K4, depth 12); + * <= 10 on 5 vertices (K5, depth 10); + * <= 9 on 6 vertices (K6, depth 9); + * <= 12 on cycles with <= 6 vertices (C5, C6); + * <= 12 on a few other special graphs with 6 vertices. + * + * Superficially redundant solutions have been removed: + * + * (a): If sequences S1, S2 have equal length but the edges set E1 is a subset + * of E2, keep only S1, since every graph allowing S2 would also allow S1. + * + * (b): If sequences S1, S2 have len(S1) < len(S2), keep S2 exactly when E2 is + * NOT a subset of E1 (since, then there are graphs containing E2 which do NOT + * contain E1, so that S2 may be possible when S1 is impossible). + * + * Finally, to save space, every sequence was checked before insertion, and + * inserted ONLY if its inverse was not already present in the table (since + * inverting permutations is trivial for swaps: just reverse the order). Hence, + * the table is only about half the size that it would otherwise be. + * + * But, whilst these sequences are universally valid, + * this class knows nothing about HOW to look up results in the table + * efficiently. The current lookup algorithms are quite crude (but actually + * faster than fancier algorithms for this table size), but there is some + * possibility of speedup (although not result improvements) if a really fancy + * search/filtering algorithm can be found. + * + * NOTE: the format is reasonable, but still not as compressed as possible; + * it still contains multiple isomorphic entries. A more complicated hashing + * scheme is required to cut down on these isomorphic copies. (E.g., perm hash + * 2, meaning the mapping 0->1, 1->0, i.e. (01), contains 0x262, 0x484, 0x737, + * meaning swap sequences [02 12 02], [04 14 04], [13 03 13]. It is easily seen + * that all 3 are isomorphic. The first two are of the form [ab cb ab] == [ac], + * and the third has the form [ab cb ab] == [ca].) It seems like we'd need a + * scheme involving integer hashing of graphs, with few isomorphic collisions, + * but such algoritms need to be pretty simple and fast or they're not worth + * doing except for much larger table sizes). + */ +struct SwapSequenceTable { + /** The integer type used to encode a swap sequence on vertices {0,1,2,3,4,5}. + */ + typedef std::uint_fast64_t Code; + + /** The KEY is a "permutation hash", i.e. a number representing a permutation + * on {0,1,2,3,4,5}. (Not all possible permutations are represented, though; + * suitable vertex relabelling changes many different permutations to the same + * hash). + * + * See CanonicalRelabelling.hpp, SwapConversion.hpp for more explanation. + * + * The VALUE is a list of integers encoding a swap sequence, which all induce + * the permutation on {0,1,2,3,4,5} with the given hash. + * (Obviously, different sequences are allowed, because some swaps might not + * be possible, i.e. the graph might be incomplete). + */ + typedef std::map> Table; + + /** The actual large precomputed table. The entries are already sorted + * and duplications/redundancies/suboptimality have been removed. + * However, currently this raw data is processed by + * FilteredSwapSequences which tolerates such imperfections. + * Thus it is easy to add more sequences to the table without worrying + * about them (as long as the newly added data is actually correct). + * @return A large precomputed raw table of data. + */ + static Table get_table(); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp new file mode 100644 index 0000000000..5c1e83e7f4 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp @@ -0,0 +1,157 @@ +#include "VertexMapResizing.hpp" + +#include +#include + +#include "Utils/Assert.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +VertexMapResizing::VertexMapResizing(NeighboursInterface& neighbours) + : m_neighbours(neighbours) {} + +const vector& VertexMapResizing::operator()(size_t vertex) { + const auto citer = m_cached_neighbours.find(vertex); + if (citer != m_cached_neighbours.cend()) { + return citer->second; + } + auto& list = m_cached_neighbours[vertex]; + list = m_neighbours(vertex); + for (auto other_v : list) { + m_cached_full_edges.insert(get_swap(vertex, other_v)); + } + return list; +} + +const VertexMapResizing::Result& VertexMapResizing::resize_mapping( + VertexMapping& mapping, unsigned desired_size) { + m_result.success = false; + m_result.edges.clear(); + if (mapping.size() > desired_size) { + for (auto infinite_loop_guard = 1 + mapping.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto old_size = mapping.size(); + remove_vertex(mapping); + const auto new_size = mapping.size(); + if (new_size <= desired_size) { + fill_result_edges(mapping); + m_result.success = true; + return m_result; + } + if (old_size <= new_size) { + return m_result; + } + } + TKET_ASSERT(!"VertexMapResizing::resize_mapping"); + } + TKET_ASSERT(mapping.size() <= desired_size); + bool terminated_correctly = false; + for (auto infinite_loop_guard = 1 + desired_size; infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto old_size = mapping.size(); + if (old_size >= desired_size) { + terminated_correctly = true; + break; + } + add_vertex(mapping); + const auto new_size = mapping.size(); + if (old_size == new_size) { + // Couldn't add a vertex. + terminated_correctly = true; + break; + } + // Must have added exactly one vertex. + TKET_ASSERT(old_size + 1 == new_size); + } + TKET_ASSERT(terminated_correctly); + // It's acceptable to have too few vertices, + // it can still be looked up in the table. + m_result.success = true; + fill_result_edges(mapping); + return m_result; +} + +size_t VertexMapResizing::get_edge_count( + const VertexMapping& mapping, size_t vertex) { + const auto& neighbours = operator()(vertex); + return std::count_if( + neighbours.cbegin(), neighbours.cend(), + // Note that "neighbours" automatically will not contain "vertex" itself. + [&mapping](size_t vertex) { return mapping.count(vertex) != 0; }); +} + +void VertexMapResizing::add_vertex(VertexMapping& mapping) { + std::set new_vertices; + + // Multipass, maybe a bit inefficient, but doesn't matter. + // After a few calls, it's just map lookup so not so bad. + for (const auto& existing_vertex_pair : mapping) { + // A valid mapping should have the same source/target vertices, + // so don't need to consider .second. + const auto& neighbours = operator()(existing_vertex_pair.first); + for (auto vv : neighbours) { + if (mapping.count(vv) == 0) { + new_vertices.insert(vv); + } + } + } + + // Now find the new vertex which would add the largest number of new edges. + size_t maximum_new_edges = 0; + size_t best_new_vertex = std::numeric_limits::max(); + + for (auto new_v : new_vertices) { + const auto edge_count = get_edge_count(mapping, new_v); + if (edge_count > maximum_new_edges) { + best_new_vertex = new_v; + maximum_new_edges = edge_count; + } + } + if (maximum_new_edges > 0) { + mapping[best_new_vertex] = best_new_vertex; + } +} + +void VertexMapResizing::remove_vertex(VertexMapping& mapping) { + const auto invalid_number_of_edges = std::numeric_limits::max(); + + // We want to leave as many edges as possible, + // so we remove the minimum number. + size_t minimum_edges_removed = invalid_number_of_edges; + size_t best_vertex = std::numeric_limits::max(); + for (const auto& existing_vertex_pair : mapping) { + if (existing_vertex_pair.first != existing_vertex_pair.second) { + // The vertex is not fixed, so we cannot remove it. + continue; + } + const auto edge_count = get_edge_count(mapping, existing_vertex_pair.first); + if (edge_count < minimum_edges_removed) { + best_vertex = existing_vertex_pair.first; + minimum_edges_removed = edge_count; + } + } + if (minimum_edges_removed < invalid_number_of_edges) { + TKET_ASSERT(mapping.at(best_vertex) == best_vertex); + TKET_ASSERT(mapping.erase(best_vertex) == 1); + } +} + +void VertexMapResizing::fill_result_edges(const VertexMapping& mapping) { + m_result.edges.clear(); + for (auto citer1 = mapping.cbegin(); citer1 != mapping.cend(); ++citer1) { + auto citer2 = citer1; + for (++citer2; citer2 != mapping.cend(); ++citer2) { + const auto edge = get_swap(citer1->first, citer2->first); + if (m_cached_full_edges.count(edge) != 0) { + m_result.edges.push_back(edge); + } + } + } +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp new file mode 100644 index 0000000000..a049bb13c1 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp @@ -0,0 +1,108 @@ +#ifndef _TKET_TokenSwapping_TableLookup_VertexMapResizing_H_ +#define _TKET_TokenSwapping_TableLookup_VertexMapResizing_H_ + +#include +#include +#include +#include + +#include "../NeighboursInterface.hpp" +#include "../TSAUtils/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** If a vertex mapping { u -> v } has too few vertices, try to add extra + * vertices, fixed by the new mapping, to get to the desired size. This may + * allow extra optimisations to be found in the table. E.g., imagine a vertex in + * a graph which is not moved by the mapping. Imagine that removing it makes the + * graph disconnected. If the desired mapping moves a token + * between different components, it is then impossible for any swap + * sequence within the subgraph to perform that mapping. + * However, adding the vertex back makes it possible. + * + * If instead there are too many vertices to look up in the table, it tries + * to remove vertices which are fixed by the mapping to get it down to size. + */ +class VertexMapResizing : public NeighboursInterface { + public: + /** Store a Neighbours object, to be used throughout when required to find + * all neighbours of a given vertex. The caller must ensure that the + * object remains valid. + * @param neighbours The object to calculate neighbours of a vertex. + */ + explicit VertexMapResizing(NeighboursInterface& neighbours); + + /** Gets the data by calling the NeighboursInterface object which was passed + * into the constructor. HOWEVER, it does internal caching, so doesn't call it + * multiple times. + * @param vertex A vertex in the graph. + * @return A cached list of neighbours of that vertex, stored internally. + */ + virtual const std::vector& operator()(size_t vertex) override; + + /** The result of resizing a mapping by deleting fixed vertices if too big, + * or adding new vertices if too small. + */ + struct Result { + /** It is still a success if we have fewer vertices than the desired number + * (as this can still be looked up in the table). However, it's a failure if + * there are too many vertices (which than cannot be looked up). + */ + bool success; + + /** If successful, the edges of the subgraph containing only the vertices in + * the new mapping. */ + std::vector edges; + }; + + /** The mapping may be altered, even upon failure, so obviously the caller + * should make a copy if it needs to be preserved. Increase the map size as + * much as possible if too small (still a success even if it cannot reach the + * size). Decrease the size if too large (and not reaching the szie is then a + * failure). Newly added or removed vertices are all fixed, i.e. map[v]=v. + * @param mapping The mapping which will be altered and returned by reference. + * @param desired_size The size we wish to reach, or as close as possible if + * the mapping is currently too small. + */ + const Result& resize_mapping( + VertexMapping& mapping, unsigned desired_size = 6); + + private: + NeighboursInterface& m_neighbours; + Result m_result; + + // KEY: a vertex. VALUE: all its neighbours. + std::map> m_cached_neighbours; + std::set m_cached_full_edges; + + /** How many edges join the given vertex to other existing vertices? + * @param mapping The current vertex permutation which we may expand or + * contract. + * @param vertex A vertex which may or may not be already within the mapping. + * @return The total number of edges within the LARGER graph joining the + * vertex to other vertices within the mapping. + */ + size_t get_edge_count(const VertexMapping& mapping, size_t vertex); + + /** Try to add a single new fixed vertex to the mapping, i.e. a new v with + * map[v]=v. + * @param mapping The current vertex permutation which we wish to expand by + * one vertex. + */ + void add_vertex(VertexMapping& mapping); + + /** Try to remove a single vertex within the mapping, but only if it is fixed, + * i.e. map[v]==v. + * @param mapping The current vertex permutation which we wish to shrink by + * one vertex. + */ + void remove_vertex(VertexMapping& mapping); + + /** Within the m_result object, fill "edges" for the new mapping. */ + void fill_result_edges(const VertexMapping& mapping); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/TrivialTSA.cpp b/tket/src/TokenSwapping/TrivialTSA.cpp new file mode 100644 index 0000000000..3e1006417d --- /dev/null +++ b/tket/src/TokenSwapping/TrivialTSA.cpp @@ -0,0 +1,292 @@ +#include "TrivialTSA.hpp" + +#include +#include + +#include "CyclicShiftCostEstimate.hpp" +#include "TSAUtils/DebugFunctions.hpp" +#include "TSAUtils/DistanceFunctions.hpp" +#include "TSAUtils/GeneralFunctions.hpp" +#include "TSAUtils/VertexSwapResult.hpp" +#include "Utils/Assert.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { + +// Make an arrow from each nonempty vertex to its target; +// what are the connected components of the resulting directed graph? +// Two different arrows cannot point INTO the same vertex. +// So, EITHER a cycle (so, a, abstract cyclic shift on tokens is performed), +// OR a path, with all except the final vertex being nonempty. +// In either case, we enact a cyclic shift. + +// To find a component, we might have to go backwards along arrows +// as well as forwards. + +TrivialTSA::TrivialTSA(Options options) : m_options(options) { + m_name = "Trivial"; +} + +void TrivialTSA::set(Options options) { m_options = options; } + +bool TrivialTSA::grow_cycle_forwards( + const VertexMapping& vertex_mapping, Endpoints& endpoints) { + auto current_id = endpoints.first; + const auto start_vertex = m_abstract_cycles_vertices.at(current_id); + + // If valid, a single cycle contains at most one empty vertex. + // Thus there are at most N+1 vertices. + for (size_t infin_loop_guard = vertex_mapping.size() + 1; + infin_loop_guard != 0; --infin_loop_guard) { + const auto v1 = m_abstract_cycles_vertices.at(current_id); + const auto citer = vertex_mapping.find(v1); + if (citer == vertex_mapping.cend()) { + // We end at an empty vertex. + endpoints.second = current_id; + return false; + } + if (citer->second == start_vertex) { + // We've hit the start. + endpoints.second = current_id; + return true; + } + current_id = m_abstract_cycles_vertices.insert_after(current_id); + m_abstract_cycles_vertices.at(current_id) = citer->second; + } + throw std::runtime_error( + "TrivialTSA::grow_cycle_forwards: " + "hit vertex count limit; invalid vertex mapping"); +} + +void TrivialTSA::grow_cycle_backwards(Endpoints& endpoints) { + auto current_id = endpoints.first; + + // In a valid cycle, every vertex but one (the empty vertex) + // is the target of something, and therefore there are <= N+1 vertices. + for (size_t infin_loop_guard = m_reversed_vertex_mapping.size() + 1; + infin_loop_guard != 0; --infin_loop_guard) { + const auto v1 = m_abstract_cycles_vertices.at(current_id); + const auto citer = m_reversed_vertex_mapping.find(v1); + if (citer == m_reversed_vertex_mapping.cend()) { + // Our vertex is not the target of anything. + // So, it's the START. + endpoints.first = current_id; + return; + } + // Remember the reverse order! + current_id = m_abstract_cycles_vertices.insert_before(current_id); + m_abstract_cycles_vertices.at(current_id) = citer->second; + } + throw std::runtime_error( + "TrivialTSA::grow_cycle_backwards: " + "hit vertex count limit; invalid vertex mapping"); +} + +void TrivialTSA::do_final_checks() const { + m_vertices_seen.clear(); + for (const auto& entry : m_reversed_vertex_mapping) { + m_vertices_seen.insert(entry.first); + m_vertices_seen.insert(entry.second); + } + TKET_ASSERT(m_vertices_seen.size() == m_abstract_cycles_vertices.size()); + + // Erase them again...! + for (const auto& endpoints : m_cycle_endpoints) { + for (auto id = endpoints.first;; + id = m_abstract_cycles_vertices.next(id).value()) { + TKET_ASSERT( + m_vertices_seen.erase(m_abstract_cycles_vertices.at(id)) == 1); + if (id == endpoints.second) { + break; + } + } + } + TKET_ASSERT(m_vertices_seen.empty()); +} + +void TrivialTSA::fill_disjoint_abstract_cycles( + const VertexMapping& vertex_mapping) { + m_vertices_seen.clear(); + m_abstract_cycles_vertices.clear(); + m_cycle_endpoints.clear(); + m_reversed_vertex_mapping = get_reversed_map(vertex_mapping); + Endpoints endpoints; + + // Get the disjoint abstract cycles. + for (const auto& entry : vertex_mapping) { + if (m_vertices_seen.count(entry.first) != 0) { + continue; + } + m_abstract_cycles_vertices.push_back(entry.first); + endpoints.first = m_abstract_cycles_vertices.back_id().value(); + if (!grow_cycle_forwards(vertex_mapping, endpoints)) { + grow_cycle_backwards(endpoints); + } + m_cycle_endpoints.push_back(endpoints); + + // Now, add the vertices to vertices seen... + for (auto id = endpoints.first;; + id = m_abstract_cycles_vertices.next(id).value()) { + TKET_ASSERT( + m_vertices_seen.insert(m_abstract_cycles_vertices.at(id)).second); + if (id == endpoints.second) { + break; + } + } + } +} + +void TrivialTSA::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& /*not needed*/, + PathFinderInterface& path_finder) { + append_partial_solution(swaps, vertex_mapping, distances, path_finder); +} + +void TrivialTSA::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, PathFinderInterface& path_finder) { + if (all_tokens_home(vertex_mapping)) { + return; + } + fill_disjoint_abstract_cycles(vertex_mapping); + do_final_checks(); + + if (m_options == Options::FULL_TSA) { + // OK, below, for a single cycle, we use CyclicShiftCostEstimate + // to estimate, not ONLY the cheapest single cycle, but ALSO + // the start vertex to enact it most cheaply. + // We could do that here also and it might save a bit, + // BUT the full Trivial TSA is really only used for testing now + // so don't bother. + append_partial_solution_with_all_cycles(swaps, vertex_mapping, path_finder); + return; + } + TKET_ASSERT(m_options == Options::BREAK_AFTER_PROGRESS); + // We're only going to do ONE cycle; so find which cycle + // has the shortest estimated number of swaps + size_t best_estimated_concrete_swaps = std::numeric_limits::max(); + Endpoints best_endpoints; + size_t start_v_index = std::numeric_limits::max(); + + for (const auto& endpoints : m_cycle_endpoints) { + copy_vertices_to_work_vector(endpoints); + if (m_vertices_work_vector.size() < 2) { + TKET_ASSERT(m_vertices_work_vector.size() == 1); + continue; + } + const CyclicShiftCostEstimate estimate(m_vertices_work_vector, distances); + TKET_ASSERT( + estimate.estimated_concrete_swaps < std::numeric_limits::max()); + TKET_ASSERT(estimate.start_v_index < m_vertices_work_vector.size()); + if (estimate.estimated_concrete_swaps < best_estimated_concrete_swaps) { + best_estimated_concrete_swaps = estimate.estimated_concrete_swaps; + start_v_index = estimate.start_v_index; + best_endpoints = endpoints; + } + } + TKET_ASSERT( + best_estimated_concrete_swaps < std::numeric_limits::max()); + const auto swap_size_before = swaps.size(); + const auto decrease = append_partial_solution_with_single_cycle( + best_endpoints, start_v_index, swaps, vertex_mapping, distances, + path_finder); + TKET_ASSERT(swap_size_before < swaps.size()); + TKET_ASSERT(decrease > 0); +} + +void TrivialTSA::copy_vertices_to_work_vector(const Endpoints& endpoints) { + m_vertices_work_vector.clear(); + for (auto id = endpoints.first;; + id = m_abstract_cycles_vertices.next(id).value()) { + m_vertices_work_vector.push_back(m_abstract_cycles_vertices.at(id)); + if (id == endpoints.second) { + break; + } + } +} + +void TrivialTSA::append_partial_solution_with_all_cycles( + SwapList& swaps, VertexMapping& vertex_mapping, + PathFinderInterface& path_finder) { + for (const auto& endpoints : m_cycle_endpoints) { + copy_vertices_to_work_vector(endpoints); + if (m_vertices_work_vector.size() < 2) { + continue; + } + // Break the abstract cycle into abstract swaps... + // To shift: [a,b,c,d] -> [d,a,b,c], we do abstract swaps in + // opposite order of the shift direction, i.e. cd bc ab + for (size_t ii = m_vertices_work_vector.size() - 1; ii > 0; --ii) { + // Abstract swap(v1, v2). + const auto v1 = m_vertices_work_vector[ii]; + const auto v2 = m_vertices_work_vector[ii - 1]; + TKET_ASSERT(v1 != v2); + const auto& path = path_finder(v1, v2); + TKET_ASSERT(path.size() >= 2); + append_swaps_to_interchange_path_ends(path, vertex_mapping, swaps); + } + } +} + +size_t TrivialTSA::append_partial_solution_with_single_cycle( + const Endpoints& endpoints, size_t start_v_index, SwapList& swaps, + VertexMapping& vertex_mapping, DistancesInterface& distances, + PathFinderInterface& path_finder) { + copy_vertices_to_work_vector(endpoints); + TKET_ASSERT(m_vertices_work_vector.size() >= 2); + TKET_ASSERT(start_v_index < m_vertices_work_vector.size()); + + // Can go negative! But MUST be >= 1 at the end + // (otherwise this cycle was useless and should never have occurred). + int current_L_decrease = 0; + + // To shift: [a,b,c,d] -> [d,a,b,c], we do abstract swaps in the opposite + // order to the shift direction, i.e. cd bc ab + for (size_t ii = m_vertices_work_vector.size() - 1; ii > 0; --ii) { + // Abstract swap(v1, v2). + const auto v1 = m_vertices_work_vector + [(ii + start_v_index) % m_vertices_work_vector.size()]; + + const auto v2 = m_vertices_work_vector + [((ii - 1) + start_v_index) % m_vertices_work_vector.size()]; + + TKET_ASSERT(v1 != v2); + const auto& path = path_finder(v1, v2); + TKET_ASSERT(path.size() >= 2); + + // e.g., to swap endpoints: [x,a,b,c,y] -> [y,a,b,c,x], + // do concrete swaps xa ab bc cy bc ab xa. + + // xa ab bc cy ...(ascending) + for (size_t jj = 1; jj < path.size(); ++jj) { + current_L_decrease += + get_swap_decrease(vertex_mapping, path[jj], path[jj - 1], distances); + + VertexSwapResult(path[jj], path[jj - 1], vertex_mapping, swaps); + if (current_L_decrease > 0) { + return static_cast(current_L_decrease); + } + } + // Now the reverse: bc ab xa + for (size_t kk = path.size() - 2; kk > 0; --kk) { + current_L_decrease += + get_swap_decrease(vertex_mapping, path[kk], path[kk - 1], distances); + + VertexSwapResult(path[kk], path[kk - 1], vertex_mapping, swaps); + if (current_L_decrease > 0) { + return static_cast(current_L_decrease); + } + } + } + // The cycle MUST have decreased L overall, + // otherwise we shouldn't have done it. + TKET_ASSERT(!"TrivialTSA::append_partial_solution_with_single_cycle"); + return 0; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TrivialTSA.hpp b/tket/src/TokenSwapping/TrivialTSA.hpp new file mode 100644 index 0000000000..2b054d1f43 --- /dev/null +++ b/tket/src/TokenSwapping/TrivialTSA.hpp @@ -0,0 +1,209 @@ +#ifndef _TKET_TokenSwapping_TrivialTSA_H_ +#define _TKET_TokenSwapping_TrivialTSA_H_ + +#include + +#include "PartialTsaInterface.hpp" + +namespace tket { +namespace tsa_internal { + +/** A full TSA, simple and fast but not giving very good solutions. + * This works by decomposing the desired mapping into abstract disjoint + * cycles, decomposing the abstract cycles into lists of abstract swaps, + * then finally decomposing the abstract swaps into concrete swaps. + * ("Abstract" means that the vertices invloved are not necessarily + * adjacent, so the actual swaps cannot be calculated without knowing + * the graph, and "concrete" swaps are actual swaps beteen adjacent vertices). + * Because the ABSTRACT cycles are disjoint, we are free to perform them, + * as long as no other vertices are moved when doing so (they may be moved + * in intermediate steps, but will be moved back again by the end of each + * cycle). Thus we are guaranteed to get a full solution, + * although in tests it can easily give 20-30% more swaps than the best TSA. + */ +class TrivialTSA : public PartialTsaInterface { + public: + /** Extra options to control behaviour. */ + enum class Options { + /** Run the algorithm to completion. */ + FULL_TSA, + + /** Start running the calculated swaps, + * but terminate as soon as nonzero L decrease occurs + * (which thus gives a Partial TSA). + */ + BREAK_AFTER_PROGRESS + }; + + /** By default, it's a full TSA. + * @param options Option to set behaviour; by default, a full TSA. + */ + explicit TrivialTSA(Options options = Options::FULL_TSA); + + /** Set another option. + * @param options The option to be set from now on. + */ + void set(Options options); + + /** Calculate and append the complete solution (or break off early if + * BREAK_AFTER_PROGRESS was set). The point is that this partial TSA + * is not so good, but will be combined with other partial TSAs which + * are better, so we want to break off ASAP when progress occurs. + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping, will be updated. + * @param distances An object to calculate distances between vertices. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. + */ + virtual void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& /*not_needed*/, + PathFinderInterface& path_finder) override; + + /** The same as the standard append_partial_solution interface, + * but without needing to pass in a NeighboursInterface. + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping, will be updated. + * @param distances An object to calculate distances between vertices. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. + */ + void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, PathFinderInterface& path_finder); + + private: + // NOTE: the reason this is all a bit more complicated (and so, the word + // "trivial" is a bit unfair) is that we have to allow empty vertices. + // With full vertices (every vertex having a token), we can find cycles just + // by starting anywhere and going forwards until we hit the start again. + // But if some vertices can be empty, we may not be able to go forward + // once we hit an empty vertex, so we then have to go backwards also + // until we cannot anymore, and finally link the empty end with the nonempty + // start vertex to make a cycle. + // However, it's really just the same algorithm as the full tokens case. + + Options m_options; + + /** This will contain ALL relevant vertices for ALL cycles, but another + * object m_cycle_endpoints will store information about where + * each cycle starts and ends. + */ + VectorListHybrid m_abstract_cycles_vertices; + mutable std::set m_vertices_seen; + + typedef VectorListHybrid::ID ID; + + /** For an abstract cycle: the first is the ID of the start vertex in + * "m_abstract_cycles_vertices" (which already has a builtin linked list + * structure), the second is the final vertex. + */ + typedef std::pair Endpoints; + + /** Information about where each cycle starts and ends, + * using the vertices in m_abstract_cycles_vertices. + */ + std::vector m_cycle_endpoints; + std::vector m_vertices_work_vector; + + /** Fills m_abstract_cycles_vertices, m_cycle_endpoints with the cycles. + * @param vertex_mapping The current desired mapping. + */ + void fill_disjoint_abstract_cycles(const VertexMapping& vertex_mapping); + + /** Taking the given first element of "endpoints" as the start vertex, + * already known to be in "vertex_mapping", follow the arrows forwards + * until no more arrows exist, OR it wraps around to the first vertex, + * adding the vertices to "m_abstract_cycles_vertices" as we go, + * and updating "endpoints". Does NOT change m_vertices_seen. + * @param vertex_mapping The current desired mapping. + * @param endpoints The IDs of the vertex endpoints of the desired new cycle + * (but only the first ID is valid at the start; the second ID will be + * updated). + * @return TRUE if a cycle is found, FALSE if it ends at an empty vertex. + */ + bool grow_cycle_forwards( + const VertexMapping& vertex_mapping, Endpoints& endpoints); + + /** To be called immediately after grow_cycle_forwards, + * if the end vertex did NOT wrap around to the start vertex. + * So, go backwards from the start vertex until we cannot any more. + * (We can't hit the end vertex since it's empty, + * so no arrow can come from there). + * Update endpoints.first. + * Does NOT change m_vertices_seen. Uses m_reversed_vertex_mapping. + * @param endpoints The IDs of the partial vertex cycle start and end + * vertices, to be updated (the end of the cycle must wrap round + * to the start; the start is not yet determined). + */ + void grow_cycle_backwards(Endpoints& endpoints); + + /** The ordinary vertex mapping is from v1 to v2, + * where v2 is the target of the token currently at v1. + * For this mapping, the key is v2, the value is v1. + */ + VertexMapping m_reversed_vertex_mapping; + + /** Checks validity/consistency of the data in m_abstract_cycles_vertices, + * m_cycle_endpoints, m_reversed_vertex_mapping and throws if invalid. + */ + void do_final_checks() const; + + /** Gets the vertices stored in order in m_abstract_cycles_vertices, + * given by the Endpoints, and copies them to m_vertices_work_vector. + * (Necessary because we need to do random access, which VectorListHybrid + * does not have). + * @param endpoints The IDs of the complete vertex cycle start and end, + * listed in order in m_abstract_cycles_vertices. + */ + void copy_vertices_to_work_vector(const Endpoints& endpoints); + + /** Once m_abstract_cycles_vertices and m_cycle_endpoints have been filled, + * append the complete solution. + * (We don't need to find distances any more, we need actual paths). + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping, will be updated. + * @param path_finder The object to calculate a shortest path between any + * pair of vertices. + */ + void append_partial_solution_with_all_cycles( + SwapList& swaps, VertexMapping& vertex_mapping, + PathFinderInterface& path_finder); + + /** Perform the single abstract cycle, but breaking off as soon as + * the overall total home distance (L) decreases. + * (Every abstract cycle has strictly positive L-decrease, otherwise + * it wouldn't be included at all, so doing the whole thing must decrease L. + * But if we're lucky, we'll decrease L earlier). + * + * Note that we ALSO have to do some estimation, not only to choose + * which cycle is likely to be cheap, but ALSO to decide where to + * start from. (An ABSTRACT cycle [v0, v1, v2, ..., vn] is decomposed into + * ABSTRACT swaps (v0, v1).(v1,v2). ... .(v(n-1), vn), which omits the + * abstract swap (vn,v0), but we could have chosen any other v(i) to be + * the start vertex. Unlike for CONCRETE swaps, abstract swaps have + * different costs, so it's important to choose well). + * + * @param endpoints The IDs of the ends of the final cycle + * we've decided to use. + * @param start_v_index The starting index in the final cycle vertices, + * treating it logically as a vector. (The indices wrap round and reduce + * modulo the size). + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping, will be updated. + * @param distances An object to calculate distances between vertices. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. + * @return the actual L-decrease (will be strictly positive). + */ + size_t append_partial_solution_with_single_cycle( + const Endpoints& endpoints, size_t start_v_index, + // L (the sum of the distances to home) must decrease + // by at least this amount, to break off early. + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, PathFinderInterface& path_finder); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/VectorListHybrid.hpp new file mode 100644 index 0000000000..0e88ecec21 --- /dev/null +++ b/tket/src/TokenSwapping/VectorListHybrid.hpp @@ -0,0 +1,522 @@ +#ifndef _TKET_TokenSwapping_VectorListHybrid_H_ +#define _TKET_TokenSwapping_VectorListHybrid_H_ + +#include +#include + +#include "Utils/Assert.hpp" +#include "VectorListHybridSkeleton.hpp" + +namespace tket { +namespace tsa_internal { + +struct OverwriteIntervalResult { + size_t number_of_overwritten_elements; + VectorListHybridSkeleton::Index final_overwritten_element_id; +}; + +/** VectorListHybrid combines some functionality of std::vector + * and std::list, with the following goals: + * + * Objects are stored internally inside a std::vector. + * + * UNLIKE STL linked lists: erasure/insertion does NOT cause dynamic + * memory allocation/deallocation (except when more space + * is needed, in which case a vector reallocation takes place). + * + * All operations are O(1), except insertions which are amortised O(1) + * (because a vector reallocation may be needed for more storage space). + * + * Objects are not actually destroyed, they are merely marked for later reuse. + * Thus this class is good when objects are expensive to construct, + * but cheap to reuse and clear, and will be reused many times. + * (E.g., imagine a std::vector> being repeatedly resized; + * all those inner std::vector are repeatedly deallocated and reallocated). + * + * Objects can be accessed at any position, via an ID (like a vector index). + * + * Erasure/insertion does NOT invalidate other IDs, unless that element + * was erased (or the whole container cleared). + * + * NOTE: "previous" and "next" directions, by analogy with std::vector, + * correspond to the logical order the elements are regarded to have, + * AS IF they sat in a vector which we iterated through in forwards + * order (which, of course, is unrelated to where they are actually stored + * internally). Thus, "next", "forward" moves go towards the BACK; "previous", + * "backward" moves go towards the FRONT. This should not confuse if we remember + * std::vector itself, with begin() and rbegin() iterators. + * + * TODO: there are no O(log N) operations, and no checks for invalid indices. + * This could be achieved by wrapping this class and storing + * sets/maps of erased/inserted IDs, etc. etc. Then everything would become + * O(log N) or amortised O(log N) instead of O(1), but we'd also have + * complete checks. + * + * TODO: this class should have its own tests. Right now it is only used + * in other things (SwapListOptimiser) which do have end-to-end tests, + * so it's quite reliable but not as reliable as it could be. + * + * TODO: Once this is well tested, move it to Utils for wider use. + */ +template +class VectorListHybrid { + public: + /** NOTE: the ID is NOT necessarily an actual vector index; + * that's an implementation detail. + */ + typedef VectorListHybridSkeleton::Index ID; + + VectorListHybrid(); + + /** Returns an ID which is guaranteed NEVER to be valid. + * @return an ID value guaranteed NEVER to be valid. + */ + static ID get_invalid_id(); + + /** Logical clear: doesn't actually delete the elements, + * just relabels them for reuse. Time O(N). + * After this, all data - even IDs - will behave AS IF + * it were a new object. + */ + void clear(); + + /** Logical clear: doesn't actually delete the elements, + * just relabels them for reuse. Time O(1). + * After calling this function, IDs related to + * inserting/erasing elements may be different from + * those which would be obtained by the same sequence + * of operations on a new object. + */ + void fast_clear(); + + /** Like std::reverse, reverses the (logical) order of the elements. (Not the + * physical order: the internal vector of T objects is unchanged, only the + * links are changed). Existing ids may be invalidated. Time O(n). + */ + void reverse(); + + bool empty() const; + + /** The number of valid elements stored (not, of course, the actual + * internal number of elements, which is larger if some are waiting + * to be reused). + * @return The number of active elements stored. + */ + size_t size() const; + + /** Exactly like std::vector push_back. Fine if T is lightweight. + * Otherwise, maybe better to reuse elements. + * @param elem The T object to be copied and stored. + */ + void push_back(const T& elem); + + /** Like push_back, creates a new element after the current back, + * but returns the ID for the new element (which of course might not + * really be new; it is for reuse - it may be an old T object). + * Of course the returned ID is the same as would be obtained + * from back_id(). + * @return The ID of the newly created (or reused) element. + */ + ID emplace_back(); + + /** Erase the element at the back, but no checks for validity. */ + void pop_back(); + + /** Like push_back, but instead inserts the new element before + * the existing front element (so that it becomes the new front). + * @param elem The T object to be copied and stored. + */ + void push_front(const T& elem); + + /** Like emplace_back(), but creates the new element at the front, + * like push_front. However, returns the ID of the new object + * at the front. + * @return The ID of the newly created (or reused) element, at the front. + */ + ID emplace_front(); + + /** Erase the element at the front, but no checks for validity. */ + void pop_front(); + + /** Creates a new element after the existing one (not checked). + * @param id The ID of an existing element. + * @return The ID of the new element, inserted immediately after + * (i.e., "next"; towards the BACK) of the given element. + */ + ID insert_after(ID id); + + /** Creates a new element before the existing one (not checked). + * @param id The ID of an existing element. + * @return The ID of the new element, inserted immediately before + * (i.e., "previous"; towards the FRONT) of the given element. + */ + ID insert_before(ID id); + + /** Just like std::vector back(). + * Retrieve the element for reuse; must exist! + * @return A reference to the existing element at the back. + */ + T& back(); + + /** Retrieve the element for reuse; must exist! + * @return A reference to the existing element at the front. + */ + T& front(); + + /** Retrieve the stored element at the existing ID (not checked!) + * @param id The ID of an existing element. + * @return A reference to the element. + */ + T& at(ID id); + + /** Retrieve the stored element at the existing ID (not checked!) + * @param id The ID of an existing element. + * @return A reference to the element. + */ + const T& at(ID) const; + + /** Get the element ID after the given one (which MUST be valid), + * or a null ID if we're already at the back. + * @param id The ID of an existing element. + * @return The ID of the element after it (towards the BACK), + * or null if it doesn't exist. + */ + std::optional next(ID id) const; + + /** Get the ID of the element after the given one. + * @param id The ID of an existing element, OR null if none exists. + * @return The ID of the element after it (towards the BACK), + * OR null if it doesn't exist, or no ID was specified. + */ + std::optional next(std::optional id) const; + + /** Like next. Get the element ID before the given one (which MUST be valid), + * or a null ID if we're already at the front. + * @param id The ID of an existing element. + * @return The ID of the element before it (towards the FRONT), + * or null if it doesn't exist. + */ + std::optional previous(ID id) const; + + /** The ID of the back() element, if it exists. + * @return The ID of the element at back(), or null if there is none. + */ + std::optional back_id() const; + + /** The ID of the front() element, if it exists. + * @return The ID of the element at front(), or null if there is none. + */ + std::optional front_id() const; + + /** Erase the element with that ID, whilst updating other links + * (the ID must actually exist). + * @param id The ID of the existing element to erase. + */ + void erase(ID id); + + /** Starting with the given ID, erase the given number of elements. + * Equivalent to looping with erase() and next(), but more efficient. + * The list MUST contain enough elements to erase. + * @param id The ID of the initial existing element to erase. Must be valid. + * @param number_of_elements The number of elements to erase. The list MUST + * contain enough elements to be erased. + */ + void erase_interval(ID id, size_t number_of_elements); + + /** Starting with the given ID, and given cbegin, cend iterators to a + * container of T objects, overwrite whatever T objects are currently stored + * in the list with the new T objects. The list MUST be big enough to allow + * overwriting all of them. The container of T objects MUST be nonempty. + * @param id The ID of the initial existing T element to overwrite. Must be + * valid. + * @param new_elements_cbegin Const iterator to the start of a sequence of new + * T elements. + * @param new_elements_cend Const iterator to the cend of a sequence of new T + * elements. + * @return The ID of the last T element that was overwritten; MUST be valid! + */ + template + OverwriteIntervalResult overwrite_interval( + ID id, const CIter& new_elements_cbegin, const CIter& new_elements_cend); + + /** Returns an ordinary vector of the data (in the correct order, + * maybe not the same as the internal storage order of course). + * @return A copy of the valid T objects stored, in the correct LOGICAL + * order, AS IF they had been inserted into a vector object throughout. + * (Of course, probably not the same as the actual storage order). + */ + std::vector to_vector() const; + + /** Doesn't clear the vector, but copies all elements to the end of it. + * @param vect A vector, which will have all the valid elements in this + * object pushed back to it. + */ + void append_to_vector(std::vector& vect) const; + + /** Only for debugging purposes. + * @return A string giving further details of the internal data. + */ + std::string debug_str() const; + + private: + VectorListHybridSkeleton m_links_data; + + /// The actual stored elements. + std::vector m_data; + + /** Returns the ID if valid, or null if not. + * @param id An ID, maybe invalid. + * @return The ID again, if valid, or null if not. + */ + static std::optional optional_id(ID id); + + /** Checks if m_data is big enough for the ID (which is really an index, + * returned by m_links_data). If not, resizes m_data if necessary, + * and just returns the ID unchanged. + * @param id An ID, valid for m_links_data, but maybe not for m_data. + * @return The passed in ID, but now definitely valid. + */ + ID get_checked_new_id(ID id); + + /** The list must currently be empty (but not checked). Creates a new + * element, resizes m_data if necessary, and returns the ID. + * @return The ID of the newly created (or reused) element. + */ + ID insert_for_empty_list(); +}; + +template +VectorListHybrid::VectorListHybrid() {} + +template +typename VectorListHybrid::ID VectorListHybrid::get_invalid_id() { + return VectorListHybridSkeleton::get_invalid_index(); +} + +template +std::optional::ID> +VectorListHybrid::optional_id(ID id) { + if (id == VectorListHybridSkeleton::get_invalid_index()) { + return {}; + } + return id; +} + +template +void VectorListHybrid::clear() { + m_links_data.clear(); +} + +template +void VectorListHybrid::fast_clear() { + m_links_data.fast_clear(); +} + +template +void VectorListHybrid::reverse() { + m_links_data.reverse(); +} +template +bool VectorListHybrid::empty() const { + return m_links_data.size() == 0; +} + +template +size_t VectorListHybrid::size() const { + return m_links_data.size(); +} + +template +void VectorListHybrid::push_back(const T& elem) { + emplace_back(); + back() = elem; +} + +template +typename VectorListHybrid::ID VectorListHybrid::emplace_back() { + if (empty()) { + insert_for_empty_list(); + } else { + insert_after(m_links_data.back_index()); + } + return m_links_data.back_index(); +} + +template +void VectorListHybrid::pop_back() { + erase(m_links_data.back_index()); +} + +template +void VectorListHybrid::push_front(const T& elem) { + emplace_front(); + front() = elem; +} + +template +typename VectorListHybrid::ID VectorListHybrid::emplace_front() { + if (empty()) { + insert_for_empty_list(); + } else { + insert_before(m_links_data.front_index()); + } + return m_links_data.front_index(); +} + +template +void VectorListHybrid::pop_front() { + erase(m_links_data.front_index()); +} + +template +typename VectorListHybrid::ID VectorListHybrid::insert_for_empty_list() { + m_links_data.insert_for_empty_list(); + return get_checked_new_id(m_links_data.front_index()); +} + +template +typename VectorListHybrid::ID VectorListHybrid::insert_after( + VectorListHybrid::ID id) { + m_links_data.insert_after(id); + return get_checked_new_id(m_links_data.next(id)); +} + +template +typename VectorListHybrid::ID VectorListHybrid::insert_before( + VectorListHybrid::ID id) { + m_links_data.insert_before(id); + return get_checked_new_id(m_links_data.previous(id)); +} + +template +T& VectorListHybrid::back() { + return m_data[m_links_data.back_index()]; +} + +template +T& VectorListHybrid::front() { + return m_data[m_links_data.front_index()]; +} + +template +T& VectorListHybrid::at(ID id) { + return m_data[id]; +} + +template +const T& VectorListHybrid::at(ID id) const { + return m_data[id]; +} + +template +std::optional::ID> VectorListHybrid::next( + ID id) const { + const ID index = m_links_data.next(id); + return optional_id(index); +} + +template +std::optional::ID> VectorListHybrid::next( + std::optional id) const { + return next(id.value()); +} + +template +std::optional::ID> VectorListHybrid::previous( + ID id) const { + return optional_id(m_links_data.previous(id)); +} + +template +std::optional::ID> VectorListHybrid::back_id() + const { + return optional_id(m_links_data.back_index()); +} + +template +std::optional::ID> VectorListHybrid::front_id() + const { + return optional_id(m_links_data.front_index()); +} + +template +void VectorListHybrid::erase(ID id) { + m_links_data.erase(id); +} + +template +void VectorListHybrid::erase_interval( + typename VectorListHybrid::ID id, size_t number_of_elements) { + m_links_data.erase_interval(id, number_of_elements); +} + +template +template +OverwriteIntervalResult VectorListHybrid::overwrite_interval( + typename VectorListHybrid::ID id, const CIter& new_elements_cbegin, + const CIter& new_elements_cend) { + // The links are unchanged; only the elements need to be changed. + OverwriteIntervalResult result; + result.final_overwritten_element_id = id; + CIter citer = new_elements_cbegin; + TKET_ASSERT(citer != new_elements_cend); + const auto max_number_of_elements = m_links_data.size(); + result.number_of_overwritten_elements = 0; + for (;;) { + m_data.at(result.final_overwritten_element_id) = *citer; + ++result.number_of_overwritten_elements; + TKET_ASSERT( + result.number_of_overwritten_elements <= max_number_of_elements); + ++citer; + if (citer == new_elements_cend) { + return result; + } + // There IS another element, where will it be overwritten? + result.final_overwritten_element_id = + m_links_data.next(result.final_overwritten_element_id); + } + // Should be impossible to reach here + TKET_ASSERT(!"VectorListHybrid::overwrite_interval"); + return result; +} + +template +void VectorListHybrid::append_to_vector(std::vector& vect) const { + vect.reserve(vect.size() + size()); + for (ID current_index = m_links_data.front_index(); + current_index != m_links_data.get_invalid_index(); + current_index = m_links_data.next(current_index)) { + vect.emplace_back(m_data[current_index]); + } +} + +template +std::vector VectorListHybrid::to_vector() const { + std::vector result; + append_to_vector(result); + return result; +} + +template +typename VectorListHybrid::ID VectorListHybrid::get_checked_new_id( + ID id) { + if (m_data.size() <= id) { + m_data.resize(id + 1); + } + return id; +} + +template +std::string VectorListHybrid::debug_str() const { + std::stringstream ss; + ss << "\nRaw stored elems:"; + for (size_t nn = 0; nn < m_data.size(); ++nn) { + ss << "\nData[" << nn << "] = " << m_data[nn]; + } + ss << "\n" << m_links_data.debug_str() << "\n"; + return ss.str(); +} + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp new file mode 100644 index 0000000000..05c832848e --- /dev/null +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -0,0 +1,295 @@ +#include "VectorListHybridSkeleton.hpp" + +#include +#include +#include + +#include "Utils/Assert.hpp" + +namespace tket { +namespace tsa_internal { + +using Index = VectorListHybridSkeleton::Index; + +const Index INVALID_INDEX = std::numeric_limits::max(); + +Index VectorListHybridSkeleton::get_invalid_index() { return INVALID_INDEX; } + +VectorListHybridSkeleton::VectorListHybridSkeleton() + : m_size(0), + m_front(INVALID_INDEX), + m_back(INVALID_INDEX), + m_deleted_front(INVALID_INDEX) {} + +void VectorListHybridSkeleton::clear() { + if (m_links.empty()) { + TKET_ASSERT(m_size == 0); + TKET_ASSERT(m_front == INVALID_INDEX); + TKET_ASSERT(m_back == INVALID_INDEX); + TKET_ASSERT(m_deleted_front == INVALID_INDEX); + return; + } + m_size = 0; + m_front = INVALID_INDEX; + m_back = INVALID_INDEX; + for (Index nn = 1; nn < m_links.size(); ++nn) { + // Not strictly necessary, as deleted links are only a forward list; + // but make absolutely sure no leakage of prior internal link data can + // occur. + m_links[nn].previous = nn - 1; + m_links[nn - 1].next = nn; + } + m_links[0].previous = INVALID_INDEX; + m_links.back().next = INVALID_INDEX; + m_deleted_front = 0; +} + +void VectorListHybridSkeleton::fast_clear() { + if (m_back == INVALID_INDEX) { + // No elements stored currently; nothing to do. + TKET_ASSERT(m_size == 0); + TKET_ASSERT(m_front == INVALID_INDEX); + return; + } + TKET_ASSERT(m_size > 0); + TKET_ASSERT(m_front != INVALID_INDEX); + TKET_ASSERT(m_links[m_back].next == INVALID_INDEX); + // There are some existing elements. + // Recall that deleted elements are ONLY a forward list, + // so we don't need to update "previous". + // To combine existing active elements with + // existing deleted elements, + // the valid elements will be joined to + // the start of the deleted list. + if (m_deleted_front != INVALID_INDEX) { + m_links[m_back].next = m_deleted_front; + } + // Convert "active" elements into deleted elements. + m_deleted_front = m_front; + m_front = INVALID_INDEX; + m_back = INVALID_INDEX; + m_size = 0; +} + +void VectorListHybridSkeleton::reverse() { + if (m_size <= 1) { + // Nothing to do. + return; + } + TKET_ASSERT(m_front != INVALID_INDEX); + TKET_ASSERT(m_back != INVALID_INDEX); + TKET_ASSERT(m_front != m_back); + // The deleted element links don't need to change. + { + auto current_index = m_front; + bool terminated_correctly = false; + for (auto infinite_loop_guard = 1 + m_links.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + auto& link = m_links[current_index]; + const auto next_index = link.next; + std::swap(link.next, link.previous); + if (next_index >= m_links.size()) { + TKET_ASSERT(next_index == INVALID_INDEX); + terminated_correctly = true; + break; + } + current_index = next_index; + } + TKET_ASSERT(terminated_correctly); + } + std::swap(m_front, m_back); +} + +size_t VectorListHybridSkeleton::size() const { return m_size; } + +Index VectorListHybridSkeleton::front_index() const { return m_front; } + +Index VectorListHybridSkeleton::back_index() const { return m_back; } + +Index VectorListHybridSkeleton::next(Index index) const { + return m_links[index].next; +} + +Index VectorListHybridSkeleton::previous(Index index) const { + return m_links[index].previous; +} + +void VectorListHybridSkeleton::erase(Index index) { + --m_size; + auto& current_link = m_links[index]; + if (current_link.previous == INVALID_INDEX) { + // We're erasing the front. + m_front = current_link.next; + } else { + m_links[current_link.previous].next = current_link.next; + } + if (current_link.next == INVALID_INDEX) { + // We're erasing the back. + m_back = current_link.previous; + } else { + m_links[current_link.next].previous = current_link.previous; + } + // Recall: deleted elements are a forward list ONLY. + current_link.next = m_deleted_front; + m_deleted_front = index; +} + +void VectorListHybridSkeleton::erase_interval( + Index index, size_t number_of_elements) { + if (number_of_elements == 0) { + return; + } + // First, find the index of the LAST element to be erased. + // Notice that this is the only O(N) part; the rest are O(1). + // We update only O(1) links in total, not O(N), + // so slightly faster than a loop of next/erase calls. + Index last_element_index = index; + for (size_t nn = 1; nn < number_of_elements; ++nn) { + last_element_index = m_links.at(last_element_index).next; + if (last_element_index >= m_links.size()) { + std::stringstream ss; + ss << "VectorListHybridSkeleton::erase_interval with start index " + << index << ", number_of_elements=" << number_of_elements << ", size " + << m_links.size() << ", run out of elements at N=" << nn + << " (got index " << last_element_index << ")"; + throw std::runtime_error(ss.str()); + } + } + TKET_ASSERT(number_of_elements <= m_size); + m_size -= number_of_elements; + + // Now, splice the soon-to-be-logically-erased interval into the deleted + // elements. Start the new deleted list at the erased interval. + const auto index_of_node_after_interval = m_links[last_element_index].next; + + // Correct whether or not m_deleted_front equals INVALID_INDEX. + m_links[last_element_index].next = m_deleted_front; + // No need to update previous, since the deleted nodes are only a forward + // list. + m_deleted_front = index; + + // Link the node BEFORE the interval to the new next node. + const auto index_of_node_before_interval = m_links[index].previous; + + if (index_of_node_before_interval < m_links.size()) { + // There IS a previous node to be dealt with. + auto& next_node_index_ref = m_links[index_of_node_before_interval].next; + TKET_ASSERT(next_node_index_ref == index); + // This is correct even if index_of_node_after_interval is INVALID_INDEX. + next_node_index_ref = index_of_node_after_interval; + TKET_ASSERT(m_front != index); + } else { + // No previous node, we must have been at the start already. + TKET_ASSERT(index_of_node_before_interval == INVALID_INDEX); + TKET_ASSERT(m_front == index); + m_front = index_of_node_after_interval; + } + // Link the node AFTER the interval to the new previous node. + if (index_of_node_after_interval < m_links.size()) { + // There are more unerased elements after the interval, + // so the first one must be dealt with. + auto& prev_node_index = m_links[index_of_node_after_interval].previous; + TKET_ASSERT(prev_node_index == last_element_index); + // Correct even if there IS no node before the interval. + prev_node_index = index_of_node_before_interval; + TKET_ASSERT(m_back != last_element_index); + } else { + // No node after, we have erased up to the back. + TKET_ASSERT(index_of_node_after_interval == INVALID_INDEX); + TKET_ASSERT(m_back == last_element_index); + m_back = index_of_node_before_interval; + } + if (m_size == 0) { + TKET_ASSERT(m_front == INVALID_INDEX); + TKET_ASSERT(m_back == INVALID_INDEX); + } else { + TKET_ASSERT(m_front < m_links.size()); + TKET_ASSERT(m_back < m_links.size()); + if (m_size == 1) { + TKET_ASSERT(m_front == m_back); + } + } +} + +void VectorListHybridSkeleton::insert_for_empty_list() { + const auto new_index = get_new_index(); + m_front = new_index; + m_back = new_index; + m_links[new_index].next = INVALID_INDEX; + m_links[new_index].previous = INVALID_INDEX; +} + +void VectorListHybridSkeleton::insert_after(Index index) { + const auto new_index = get_new_index(); + const auto old_next = m_links[index].next; + m_links[index].next = new_index; + m_links[new_index].next = old_next; + m_links[new_index].previous = index; + if (old_next == INVALID_INDEX) { + // The old element was already at the back. + m_back = new_index; + } else { + m_links[old_next].previous = new_index; + } +} + +void VectorListHybridSkeleton::insert_before(Index index) { + const auto new_index = get_new_index(); + const auto old_prev = m_links[index].previous; + m_links[index].previous = new_index; + m_links[new_index].next = index; + m_links[new_index].previous = old_prev; + if (old_prev == INVALID_INDEX) { + // The old element was already at the front. + m_front = new_index; + } else { + m_links[old_prev].next = new_index; + } +} + +Index VectorListHybridSkeleton::get_new_index() { + ++m_size; + if (m_deleted_front == INVALID_INDEX) { + // We need to create a new element, it's full. + m_links.emplace_back(); + return m_links.size() - 1; + } + // Reuse a deleted element. + const auto old_deleted_front = m_deleted_front; + m_deleted_front = m_links[old_deleted_front].next; + return old_deleted_front; +} + +std::string VectorListHybridSkeleton::debug_str() const { + std::stringstream ss; + const auto to_str = [](size_t ii) -> std::string { + if (ii == INVALID_INDEX) { + return "NULL"; + } + return std::to_string(ii); + }; + + ss << "VLHS: size " << m_size << ", front " << to_str(m_front) << " back " + << to_str(m_back) << ", del.front " << to_str(m_deleted_front); + + ss << "\nActive links: forward ["; + for (auto index = m_front; index != INVALID_INDEX; + index = m_links[index].next) { + ss << index << "->"; + } + ss << "]\nBackward ("; + for (auto index = m_back; index != INVALID_INDEX; + index = m_links[index].previous) { + ss << index << "->"; + } + ss << ")\nDel.links: {"; + for (auto index = m_deleted_front; index != INVALID_INDEX; + index = m_links[index].next) { + ss << index << "->"; + } + ss << "}"; + return ss.str(); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.hpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.hpp new file mode 100644 index 0000000000..1883a09202 --- /dev/null +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.hpp @@ -0,0 +1,153 @@ +#ifndef _TKET_TokenSwapping_VectorListHybridSkeleton_H_ +#define _TKET_TokenSwapping_VectorListHybridSkeleton_H_ + +#include +#include +#include + +namespace tket { +namespace tsa_internal { + +/** This contains only support data and algorithms for VectorListHybrid, + * a data structure combining features of std::vector and linked lists. + * No checks for invalidated or never valid indices. + * This keeps track of indices for a std::vector of data, + * without actually holding any other data itself. + * Throughout, "after", "before", "next", "previous", "front", "back" + * refer to the logical ordering, AS IF elements were being inserted into + * and erased from a std::vector, but NOT the actual order in which elements + * are stored in an actual implementation (like VectorListHybrid). + * Erased elements are not actually erased, they are reused. + */ +class VectorListHybridSkeleton { + public: + /** Represents actual indices for a std::vector, which SHOULD store + * the objects we care about (templated on the object type; but this + * class stores no data except indexing information). + */ + typedef size_t Index; + + VectorListHybridSkeleton(); + + /** "Null" indices will always be represented by this value. + * @return An index value which is guaranteed NEVER to be valid. + */ + static Index get_invalid_index(); + + /** Indices will be valid until that element is erased, + * or clear() is called, regardless of other insertions/erasures. + * A "logical" clear; does not actually clear any data, + * "erased" elements will be reused. + * But, this is time O(n) because existing internal links will be + * reset to default values. + */ + void clear(); + + /** Time O(1), does not erase internal link indices. Identical erase/insert + * calls after fast_clear() calls (i.e., respecting the ordering, but + * ignoring the internal indices) will result in the same logical list, + * BUT the returned Index values may be different. + */ + void fast_clear(); + + /** Reverses the logical order of the elements. Time O(n). */ + void reverse(); + + /** The number of elements currently stored; + * NOT equal to the underlying vector size! + * @return The number of valid elements stored. + */ + size_t size() const; + + /** The index of the front element (or the same index as returned by + * get_invalid_index() if currently empty). + * @return The index of the front element. + */ + Index front_index() const; + + /** The index of the back element (or the same index as returned by + * get_invalid_index() if currently empty). + * @return The index of the back element. + */ + Index back_index() const; + + // All input indices MUST be currently valid, + // but this is not checked. (Checking would need O(log N) time, + // since we'd have to use maps and sets). + + /** The index of the next element after the given one. + * @param index The index of a valid element (not checked). + * @return The index of the next element (or the same index as returned by + * get_invalid_index() if no next element exists). + */ + Index next(Index index) const; + + /** The index of the previous element before the given one. + * @param index The index of a valid element (not checked). + * @return The index of the previous element (or the same index as returned + * by get_invalid_index() if no previous element exists). + */ + Index previous(Index index) const; + + /** "Logical" erase of the element (the position is marked for reuse). + * @param index The index of a valid element (not checked). + */ + void erase(Index index); + + /** Logical erase of an interval of linked elements (a, next(a), + * next(next(a)), ...). Equivalent to looping with erase() and next(), but + * more efficient. The list MUST contain enough elements to erase. + * @param index The index of a valid element to start erasing at (not + * checked). + * @param number_of_elements Number of elements to erase; these MUST exist + * (the list must be big enough). + */ + void erase_interval(Index index, size_t number_of_elements); + + /** The list must currently be empty, but not checked. */ + void insert_for_empty_list(); + + /** Insert a new element after the existing one. + * @param index The index of a valid element (not checked). + */ + void insert_after(Index index); + + /** Insert a new element before the existing one. + * @param index The index of a valid element (not checked). + */ + void insert_before(Index index); + + /** A platform-independent string which can be copied into tests. + * @return A string representing the current data, useful for testing. + */ + std::string debug_str() const; + + private: + struct Link { + Index previous; + Index next; + }; + + std::vector m_links; + size_t m_size; + Index m_front; + Index m_back; + + // Deleted elements will form a second linked list for reuse + // inside the data. TRICK: forward list only, + // no need for doubly linked lists. + Index m_deleted_front; + + /** Resizes m_links if necessary to ensure that the new index + * is valid (but will reuse erased elements if possible). + * However, DOESN'T set the "previous" and "next" data; + * the caller must do that (depending on what they're doing. + * Thus, it's initially an "orphan" link). + * @return A valid index for a new Link object (but with unset fields). + */ + Index get_new_index(); +}; + +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/src/TokenSwapping/main_entry_functions.cpp b/tket/src/TokenSwapping/main_entry_functions.cpp new file mode 100644 index 0000000000..9b075e26c4 --- /dev/null +++ b/tket/src/TokenSwapping/main_entry_functions.cpp @@ -0,0 +1,129 @@ +#include "main_entry_functions.hpp" + +#include +#include + +#include "BestFullTsa.hpp" +#include "TSAUtils/VertexMappingFunctions.hpp" +#include "Utils/Assert.hpp" + +namespace tket { + +using namespace tsa_internal; + +std::vector> get_swaps( + const Architecture& architecture, const NodeMapping& node_mapping) { + std::vector> swaps; + // Before all the conversion and object construction, + // doesn't take long to check if it's actually trivial + bool trivial = true; + for (const auto& entry : node_mapping) { + if (entry.first != entry.second) { + trivial = false; + break; + } + } + if (trivial) { + return swaps; + } + // Now convert the Nodes into raw vertices for use in TSA objects. + const ArchitectureMapping arch_mapping(architecture); + VertexMapping vertex_mapping; + for (const auto& node_entry : node_mapping) { + vertex_mapping[arch_mapping.get_vertex(node_entry.first)] = + arch_mapping.get_vertex(node_entry.second); + } + TKET_ASSERT(vertex_mapping.size() == node_mapping.size()); + check_mapping(vertex_mapping); + + SwapList raw_swap_list; + BestFullTsa().append_partial_solution( + raw_swap_list, vertex_mapping, arch_mapping); + + // Finally, convert the raw swaps back to nodes. + swaps.reserve(raw_swap_list.size()); + for (auto id_opt = raw_swap_list.front_id(); id_opt; + id_opt = raw_swap_list.next(id_opt.value())) { + const auto& raw_swap = raw_swap_list.at(id_opt.value()); + swaps.emplace_back(std::make_pair( + arch_mapping.get_node(raw_swap.first), + arch_mapping.get_node(raw_swap.second))); + } + return swaps; +} + +std::tuple get_swaps( + const Architecture& architecture, + const unit_map_t& initial_logical_to_physical_map, + const unit_map_t& desired_logical_to_physical_map) { + // The physical qubits are nodes inside the architecture. + // Some Node <--> UnitID conversion is unavoidable with the current design, + // since Architecture uses Node objects, rather than UnitID objects, + // and types like vector and vector cannot be converted + // to each other without copying, even though each Node is just + // a UnitID with no extra data (C++ containers are not "covariant"). + NodeMapping node_mapping; + for (const std::pair& initial_entry : + initial_logical_to_physical_map) { + const auto citer = + desired_logical_to_physical_map.find(initial_entry.first); + if (citer == desired_logical_to_physical_map.cend()) { + std::stringstream ss; + ss << "Logical qubit " << initial_entry.first.repr() + << " is present in the initial logical->physical map, but not in the " + "target logical->physical map"; + throw std::runtime_error(ss.str()); + } + const Node source_physical_node(initial_entry.second); + const Node target_physical_node(citer->second); + node_mapping[source_physical_node] = target_physical_node; + } + if (initial_logical_to_physical_map.size() != + desired_logical_to_physical_map.size()) { + std::stringstream ss; + ss << "Initial and final logical->physical mappings have different sizes " + << initial_logical_to_physical_map.size() << ", " + << desired_logical_to_physical_map.size() + << ". There are extra logical qubits in the final map missing from the " + "initial map"; + throw std::runtime_error(ss.str()); + } + if (node_mapping.size() != initial_logical_to_physical_map.size()) { + std::stringstream ss; + ss << "Converted " << initial_logical_to_physical_map.size() + << " distinct logical qubits to " << node_mapping.size() + << " distinct physical nodes; conversion error"; + throw std::runtime_error(ss.str()); + } + const auto node_swaps = get_swaps(architecture, node_mapping); + + // Don't add unused nodes to the final circuit. + std::set nodes_seen; + for (const auto& swap : node_swaps) { + nodes_seen.insert(swap.first); + nodes_seen.insert(swap.second); + } + + std::tuple result; + + // We rely on the algorithm to be correct, + // i.e. it really has calculated the full desired mapping. + // + // NOTE: other nodes in the architecture might be involved in the swaps, + // even if they were not mentioned in any of the input logical->physical maps. + // But that's OK; if the caller wants to keep them fixed, + // they should have put them into the input maps. + std::get<1>(result) = initial_logical_to_physical_map; + std::get<2>(result) = desired_logical_to_physical_map; + + for (const Node& node : nodes_seen) { + std::get<0>(result).add_qubit(node); + } + // Now we can add the swaps. + for (const auto& swap : node_swaps) { + std::get<0>(result).add_op(OpType::SWAP, {swap.first, swap.second}); + } + return result; +} + +} // namespace tket diff --git a/tket/src/TokenSwapping/main_entry_functions.hpp b/tket/src/TokenSwapping/main_entry_functions.hpp new file mode 100644 index 0000000000..2694202306 --- /dev/null +++ b/tket/src/TokenSwapping/main_entry_functions.hpp @@ -0,0 +1,52 @@ +#ifndef _TKET_TokenSwapping_main_entry_functions_H_ +#define _TKET_TokenSwapping_main_entry_functions_H_ + +#include +#include +#include +#include + +#include "Architecture/Architectures.hpp" +#include "Circuit/Circuit.hpp" + +namespace tket { + +/** This specifies desired source->target vertex mappings. + * Any nodes not occurring as a key might be moved by the algorithm. + */ +typedef std::map NodeMapping; + +/** Version 1.1, not too bad. + * @param architecture The raw object containing the graph. + * @param node_mapping The desired source->target node mapping. + * @return The required list of node pairs to swap. + */ +std::vector> get_swaps( + const Architecture& architecture, const NodeMapping& node_mapping); + +/** An alternative interface, which just wraps the other "get_swaps" function. + * In the returned tuple, the Circuit implements using SWAP gates, + * and the unit_map_t objects are the initial and final mappings of + * logical qubits to architecture nodes. + * NOTE: the architecture may contain other nodes not mentioned in the + * input logical->physical maps, which may get moved. + * If you don't want this, you must include these nodes in the maps. + * @param architecture The architecture (containing nodes, and edges) + * @param initial_logical_to_physical_map The key is the initial logical qubit, + * the value is the existing physical node in the architecture + * which it currently maps to. + * @param desired_logical_to_physical_map The keys are the same logical qubits + * as in "initial_logical_to_physical_map", but the values are now + * the nodes where we want them to map AFTER the swaps. + * @return A circuit containing the swaps (SWAP gates only), plus the resultant + * logical to physical mappings before and after (necessarily the same as + * the input mappings, because the returned swaps should always result + * in the desired end-to-end mapping exactly). + */ +std::tuple get_swaps( + const Architecture& architecture, + const unit_map_t& initial_logical_to_physical_map, + const unit_map_t& desired_logical_to_physical_map); + +} // namespace tket +#endif diff --git a/tket/tests/Graphs/EdgeSequence.hpp b/tket/tests/Graphs/EdgeSequence.hpp index a88af430f4..311fbc77fb 100644 --- a/tket/tests/Graphs/EdgeSequence.hpp +++ b/tket/tests/Graphs/EdgeSequence.hpp @@ -19,6 +19,8 @@ #include #include +#include "RNG.hpp" + namespace tket { namespace graphs { @@ -26,8 +28,6 @@ class AdjacencyData; namespace tests { -class RNG; - /** * For having a whole sequence of checked edges * to add to a graph in a specific order, diff --git a/tket/tests/Graphs/RandomGraphGeneration.cpp b/tket/tests/Graphs/RandomGraphGeneration.cpp index af9a111c75..7c54030c57 100644 --- a/tket/tests/Graphs/RandomGraphGeneration.cpp +++ b/tket/tests/Graphs/RandomGraphGeneration.cpp @@ -19,9 +19,8 @@ #include "EdgeSequence.hpp" #include "Graphs/AdjacencyData.hpp" -#include "RNG.hpp" +#include "TokenSwapping/RNG.hpp" -using std::size_t; using std::vector; namespace tket { diff --git a/tket/tests/Graphs/RandomPlanarGraphs.cpp b/tket/tests/Graphs/RandomPlanarGraphs.cpp index 27e93709ea..f8512897d2 100644 --- a/tket/tests/Graphs/RandomPlanarGraphs.cpp +++ b/tket/tests/Graphs/RandomPlanarGraphs.cpp @@ -16,9 +16,8 @@ #include -#include "RNG.hpp" +#include "TokenSwapping/RNG.hpp" -using std::size_t; using std::vector; namespace tket { diff --git a/tket/tests/Graphs/RandomPlanarGraphs.hpp b/tket/tests/Graphs/RandomPlanarGraphs.hpp index 1f2e051e14..45d8b3b14b 100644 --- a/tket/tests/Graphs/RandomPlanarGraphs.hpp +++ b/tket/tests/Graphs/RandomPlanarGraphs.hpp @@ -19,12 +19,12 @@ #include #include +#include "RNG.hpp" + namespace tket { namespace graphs { namespace tests { -class RNG; - /** * For testing purposes only, not of much independent interest * (and definitely an inefficient implementation). diff --git a/tket/tests/Graphs/test_GraphColouring.cpp b/tket/tests/Graphs/test_GraphColouring.cpp index 4ae6c523e5..d721c22f76 100644 --- a/tket/tests/Graphs/test_GraphColouring.cpp +++ b/tket/tests/Graphs/test_GraphColouring.cpp @@ -19,18 +19,16 @@ #include "GraphTestingRoutines.hpp" #include "Graphs/AdjacencyData.hpp" #include "Graphs/GraphColouring.hpp" -#include "RNG.hpp" #include "RandomGraphGeneration.hpp" #include "RandomPlanarGraphs.hpp" +#include "TokenSwapping/RNG.hpp" using std::map; -using std::size_t; using std::vector; namespace tket { namespace graphs { namespace tests { -namespace test_GraphColouring { SCENARIO("Test many colourings: random trees") { RNG rng; @@ -363,7 +361,6 @@ SCENARIO("Test Mycielski graphs") { test_Mycielski_graph_sequence(graph, 2, 9); } -} // namespace test_GraphColouring } // namespace tests } // namespace graphs } // namespace tket diff --git a/tket/tests/Graphs/test_GraphFindComponents.cpp b/tket/tests/Graphs/test_GraphFindComponents.cpp index f4fbee9ee6..444b25db01 100644 --- a/tket/tests/Graphs/test_GraphFindComponents.cpp +++ b/tket/tests/Graphs/test_GraphFindComponents.cpp @@ -17,17 +17,15 @@ #include "Graphs/AdjacencyData.hpp" #include "Graphs/GraphRoutines.hpp" -#include "RNG.hpp" +#include "TokenSwapping/RNG.hpp" using std::map; using std::set; -using std::size_t; using std::vector; namespace tket { namespace graphs { namespace tests { -namespace test_GraphFindComponents { // For testing the connected component function struct ComponentsTestData { @@ -185,7 +183,6 @@ SCENARIO("Correctly calculates graph components") { } } -} // namespace test_GraphFindComponents } // namespace tests } // namespace graphs } // namespace tket diff --git a/tket/tests/Graphs/test_GraphFindMaxClique.cpp b/tket/tests/Graphs/test_GraphFindMaxClique.cpp index 7e3e61a2b0..c87d9a0a3a 100644 --- a/tket/tests/Graphs/test_GraphFindMaxClique.cpp +++ b/tket/tests/Graphs/test_GraphFindMaxClique.cpp @@ -18,16 +18,14 @@ #include "Graphs/AdjacencyData.hpp" #include "Graphs/GraphRoutines.hpp" #include "Graphs/LargeCliquesResult.hpp" -#include "RNG.hpp" +#include "TokenSwapping/RNG.hpp" using std::set; -using std::size_t; using std::vector; namespace tket { namespace graphs { namespace tests { -namespace test_GraphFindMaxClique { struct MaxCliqueTestData { vector> raw_adjacency_data; @@ -256,7 +254,6 @@ SCENARIO("Correctly calculates max cliques") { CHECK(cliques_seen == 160); } -} // namespace test_GraphFindMaxClique } // namespace tests } // namespace graphs } // namespace tket diff --git a/tket/tests/Graphs/test_RNG.cpp b/tket/tests/Graphs/test_RNG.cpp index de321343dc..a051e51917 100644 --- a/tket/tests/Graphs/test_RNG.cpp +++ b/tket/tests/Graphs/test_RNG.cpp @@ -18,16 +18,14 @@ #include #include -#include "RNG.hpp" +#include "TokenSwapping/RNG.hpp" -using std::size_t; using std::stringstream; using std::vector; namespace tket { namespace graphs { namespace tests { -namespace test_RNG { // Check that the RNG really is identical across all platforms. @@ -146,7 +144,6 @@ SCENARIO("RNG: permutations") { " 69 24 68 71 64 84 36 65 97 98 52 45 ]"); } -} // namespace test_RNG } // namespace tests } // namespace graphs } // namespace tket diff --git a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp new file mode 100644 index 0000000000..e7882f53e2 --- /dev/null +++ b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp @@ -0,0 +1,2630 @@ +#include "FixedCompleteSolutions.hpp" + +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +FixedCompleteSolutions::FixedCompleteSolutions() { + solutions["IBM q16"] = { + "1e:2d:3c:4b:5a:69:8:8:9:a:b:c:d:e", + "5634592312451dde_0346", + "7889781d012312de_136789d", + "45345956ab23bccd4534_456a", + "ab1d9acddebcabcdbc1dcdbc_19ae", + "8968cd1d78bcab899aabcdbc9a_179d", + "bc1201ab230e3b129aab897801_123c", + "1d01bccd12bcab239a89ab783445_01ab", + "1d6823349a1259decd2312ab45_23589de", + "9a0e890168bccd89781201230e_2567abe", + "4a891245ab239acd8978de1d12_0189abcde", + "2c56124523593478bcdecd89bcabbc_125679e", + "1d56781223de595645342c3babbccd_124679abcd", + "562368bc45cd34de2c2312ab567889453445_1235678bc", + "ab129a89234a9a78bc01566889abcd9ade2c452334234578", + "de2334abbc9aab5956788901cd1d12bc9aabbc452ccd1d56", + "bc5623349aab2c121d0ede3b23124534452312ab56_12369abcde", + "231dcd9a784a45899a893bab4ade45bc78345645342312cd1dde0e", + "4a6878cdbcab451dde9acdabbc2ccd89ab9a89de780e_013456789bde", + "1245bc1d9aab899a7859232cbcab4a34cdbccddecd120112_135789bcd", + "3468233bbc12459a0134ab0e23899a347845bcabcd1d01bc_2345679bc", + "59231dcd5678348978459abcab0edebccdbc1d0134ab9aab_01234789b", + "7823ab4a9a0e121d4a3bab01899aab891223bccdde34cdbc_1234578bce", + "34452c9a2389567812340e011dde2345340e011223344aabbccd569aabbc", + "bc011dcd2cab3b9aab9a683b560e124523344512bcab01789a899a78abbc", + "bc012c1289ab01599a89595678bc59ab9acdbcde34cdbcab9a8959453423", + "3b34452c12abbc68012378120e9a891d56ab9a238934234512_0123678bde", + "560e1dbc45abcd68019a788978129a4ade23345645345623_013456789cde", + "010ebc2cab9abcab2cbc689a34125678452301683412899a23_01236789acde", + "12780e2359de569a3b34599acd0112abbccdab2301899a89de_012345678abe", + "1d347812cd3b0e230112de34892334450e34cdbc56abbc59cd9a8978_01247de", + "2cab3b0123cdde78123459ab459a5634ab8923bc9aab9a8912cd2c_023679bcde", + "34781d23129adeab68bccd899a56784534bc23120112ab9a23ab897889_46789de", + "894acdbcab9ade8901bccdbc12deab569a7889789a4a342312012334_034578bde", + "1dbc234a9acd3b0eabbcab9acd687889de9aab3b2c340112231201_01234569acde", + "4a9a68122356bcab0189de12239a01ab89780e89cdbcabcd9a894578_02346789be", + "562c12592389bcab34cdbcdecd9a0189123bbc01abcd9ade78893bcd_01234589acde", + "bc9a23ab9a12cd3b237834ab899ade12abcd4556bc3b013412231201_01234579abcd", + "0e9aabcd3b2368ab899aab89bccd788934ab9a128978234aab3b12_0123456789abcde", + "123b9aab1d893423bc019aab1278688959344501342cbc9aabbc9a_0123456789abcde", + "89abbc9acdab3b2334de4556129aabbc2c2334455668ab9a34780123122334899a01ab", + "2312232cbcab4acdde4534687856019a45bcab23bc349acd8912bcabbc2c233445599a8" + "9", + "2359684a2c23cdbcabcd9a890ede1dbccdbcab9a7868899a4a4578342312453401231223" + "34", + "1d3b2c123401231245234a3489232ccdbc68deab789a899aab78bccddecdbc_" + "01235789bcde", + "9a2c233b89ab34decd1d12233b9aab9a5645bc122ccdab9a6878899aabbc_" + "0123456789abcde", + "4a9a344a5678128978239a1268bc3b344ade230145122334452312010e011223_" + "012345679abe", + "4a34ab9a122ccd568923011278cd23bcab3b349a45ab34bccd2312010e0112233445_" + "012346789acd", + "890ede4534abbc9a897823563bcd01de89abbc0e129a45ab23344523bc9acd1289bc_" + "01356789abce", + "01893445239a89120e78342345ab89681d12bc0112233423121dcddebc56ab9aabbccdbc" + "ab_0245678abcde", + }; + + solutions["IBM q20"] = { + "15:26:36:89:89:6ab:7ab:8cd:9cd::bf:cgh:dg:eij:ij:g:h::", + "5a67bg056a6b48cdaf8cbcab_047acg", + "16676adi5b7d678c12cg26fgbg_1258ai", + "af6abccdbcdj5bbg267d671601_6bdfgj", + "16de677ddjcd5b8cbc89abbc8ccg_1589ce", + "6b26de6b7dbc6756abdicd67decg_6bcdei", + "78dj67167dbc8c67dicgfgcd56cgcddjcd_8bfgij", + "5a7ddecddeejafghbc2648cg058c5bbh898c5b_02457cdehj", + "678d7d1612dedj38487d6b6a6701di781667cdcgfg_123467adij", + "fg26ei05bh5b56167ddj6a671678cg6a7d0148cdbg_012abdefgj", + "cg67bcaf6b16dj7ddibh5aej677c7d016716266b233949_0127bcefij", + "238d26055b6789di16677d788cbcbgdjcdaf38fgghcg48djghdecd23de", + "6b382326677dgh5605abdifg89168d48dj8c8d67bcde7d5babdj056789", + "cd5b38dj39bc67ej8c6a2367168d7d67de12af89488ddi7d_13479abdej", + "23cg567d67de268ddjbg6a898d7d671601di051248237ddj_0123567abegj", + "266bbg166a89bh67deaf8dab5b48di78057d23bh26di6b6726_12359aefhi", + "8d6abh3916cddj48238c8dbc7d38di7d8ccg5bfg05af6a26_146789cefghj", + "ab67cg7dbc5b5667dj1612gh0178cdcgfg8ccgbg6b67cd38di8d4948de8d38", + "126a56388daf5aei380548djbg8c5a678d49896bbgcg7dde_1345689abdefgj", + "26gh78ej67ab3923018d7d488c5b16cgbcaf5b67cddi1612cd_0245689adefg", + "78676adeghcgcdbcei268967dj8ddj38488cfg23cg8c4889fg_024789befhij", + "fg16de6bbc5b8ccg238d05488d8cbcdecd3926896b2623cgdi_3456789bcefj", + "6bbgfg055bbc267c7dcgde7dab8cbcab6a677d2612di488d23gh8939fgdj8ddj", + "678d78ghdjej6a89fg8dcgfg163805af0167di7d67di568c48_1456789defghij", + "6b5b16cgbc8ccd67de48difgcd3923cgghfg6a8ccddj057d67dj7d6726_15689abcdfij", + "8c67486b23268d5aghbhbcde488ddj56af7ccdcgdi39232623cddicd_345789bdefghij", + "8cbhbcde38ab6bbgbh26237c167d6739567d16di8dde2312012338_012346789bcdeghi", + "6716dj6a387d26de6b67788caf6acd2389difgcg8c893923266a48ghcgcd8d485bbc5b0" + "5", + "6b26896a8c7867166abh48bccddj1638cgbh7d01di8dfgcg1223dedi12_" + "013579abcdeghi", + "cgcddi8cgh12bccd2301decgcdab89fgcgbgdj8ccd4805djab786726_" + "01356789abcefghij", + "6a266abhaf677d48cdbcdiab16675a7dcddecd238c2656bc5b05265b23bc_" + "1234579acdfghi", + "6a6b268d8c67896adj05fgaf8c7dcg6ade671667cdbgdi8dbc7c488ddj_" + "0123678abcdefhij", + "cg26gh78bh8dde126b2339abaffg6a388c481201dj8cbc8ccddiabbccddj_" + "12348abdefghij", + "48deej5b566a8d8ccgbc7ddifg7d67bh2623398978055b16cg488cbh236a8ccgfgaf6a16" + "1223", + "8cbc6726388c895b7d677d160105de6a7dgh67fg7d8ddjcggh4838cgde_" + "0123456789bcdefhi", + "di381623385b6abc05676aabcd5baf89djbccd5adebhbc8dafde488cbc_" + "0135689abcdefghij", + "16126b01bg16677dbhdifg8cbcab6a677dde8939238ccg4849398ddjfgaf6a677dcdbcbh" + "dj5667", + "8d676abc38dj8d488cde167ddjdi67cgfgaf5a566bcd7d89cg8c1689di_" + "0123456789abcdefghij", + "8d78dibc016b23895bbh5acd128ccg89af48fg6701dj382326678dde89cgcd48_" + "02345789abcdefghj", + "163839cgcd7c67dj6a7d6701di482612ghcgcd7d676bde8d7d2339264849de_" + "0123456789abcdefghij", + "267d675b566a39djbc7ddj23bhdefg8ddi673938787dcd12bc67dibg2305cddefg_" + "025679abcdefghij", + "5a56djbgcd05defgbgcg8c2367ghcg8c7d4867gh125623786789dide39238dde8d_" + "01245689abcdefhj", + "bcdi5b7c78cg670516cd7dcg5a566748de388ccgafaffgcg8c8d7d67dj2638_" + "0123456789abcdefghij", + "38268ddj05167dbc6bbggh48de01cddeei6bbccg8cfg5a895bbc8c05382326675bbg_" + "013456789befghij", + "bc165a055b5a6a8c67ghcg38122667fg7838898c8dbcdicddjcd48cgfgaf8c89abbc_" + "012345789abcdefhi", + "5a6acdcgbc2326fgeidj49677ddjcd05decdcg5667fgafcddjcdbcbh8c6b3823266bbc_" + "0245678abcdefghj", + "cd676a2678676bbgcg385afgcgghdicg7d7838dj3948238c8d395bbccgfgaf05264856_" + "0123456789abcdefghij", + "67486b26di7ddjbhbc67166a7d677ddecd8d26bc895bcg238ccgfg26bc5bcg8c6a480526" + "238ccg_02345679abdefghij", + "bc7ccg2348677d67bh38djabcdcgfg56de67787d671667bhgh12di488d23388d7d675605" + "01dj_0123456789abcdefghij", + }; + + solutions["Triangular grid 3x3"] = { + "145:256:367:7:589:69a:7ab:b:9cd:ade:bef:f:d:e:f", + "89de498c9e_49ef", + "27129d5956ab_7bcd", + "de26af596b6a9e_25ef", + "156aaf1267377b9a_35af", + "23126aae6a0104488c_36ae", + "26aeab5a7b371559159d_12be", + "9aaf455a8dae15ab_569bcdef", + "8d9aab151248011604ae67de8d6a_024589de", + "9dab7b9a8927239e6aaf49bf1226_237bcdef", + "378c046b56ae7bbf167b6a26ae37_012346789be", + "af1237488d5a0115568c674556899aab7b_234678cf", + "9a158912deab5a59157b4556cd268c4823_12478abcd", + "cd59ab157b459d23af591215595a055a_1345679acdef", + "23055915489e5912238d0545566701164548_0135789ad", + "04379aab48595a45ae5aefde1601ae121623_0134569bdef", + "1267566759159d49af23decd1259012312de01_023579def", + "129a9d59abbf89159a9e8c5912897b055956_24589abcdef", + "7bbf5a9a37497bef15debf48efcdde45569e45_134789abf", + "9aab9d7b0104124837679a8915569d37decd_0345678abde", + "1589459aabaede265aafae12cd675a5667230459_024578bde", + "9d26046a5923bf1612567b01bf7b6756599e49489e9dcd0448", + "6756bf234559019eef12de152712236a2659156a59_03579abe", + "157b8912bfde9e23ef7b04018c4804168c0112_01235678bcdef", + "6a04aeef375667562659566a898d8c01ab9a4948ab040126488c", + "0559de898c6a9abf237baf48040104122315599d5915_35689cdf", + "2327898c7b48ae451516debf458d5a4804016712_012346789cde", + "27deef561523ae1201ab456b56450448049a9d9a_01234589abcd", + "05ef166756deabae6b8d8c896a9a67de3704010412_0134578bcdf", + "5956af2645238c5a489d055aae4515591215459d488c_34569cdef", + "598948158cef599ede8948bf9a7b59ab56ef011601_012456789cdef", + "9aae26048c49486b012316456a26569d67599d6bbf04_012389abcdef", + "05015a45491216678904bf4856599daecd236baedecd8c480401162623", + "455659167b9d23376b9a16480412010448afef7bbf7b_012345689abef", + "5626129a238c455645488ddeefaf04ab486726120104488ccddeaeab23", + "45566a4849efab5a9a01059dabbf16128c9a4823ab9a59151223379d4548", + "5a2701af23160459125a4801041201122312155aabaedecd_01234589bef", + "239a01049d599e4856261227010449af8c899a9eab7b377b_013456789ace", + "5956157b26deabbfefde9d899a89046a262337011201_0123456789abcdef", + "45488c125a451516ab9d9a48ab45567baf233767ef564548_0235678abcdef", + "591215de9ecdefab595aab8956672712014556162359129ede15458948ef23", + "895945ae568c056aab9d2656454867569a67239d6a266a_0123456789abcdef", + "9a9d049a9eefdeab498c480104488ccddeae6a26231215_0123456789abcdef", + "48abde0515129a8c23af899a5a4515596b679d594556126756_123456789bcdf", + "9e04499d599a5667898c59af9d89377b1237019a6a9a04480401_23456789acde", + "6a9a9e8d057b27ab48899a5a057b377baf8c899a8cabaf7b37_0123456789abcdef", + "5a1245af5605016b04599e4989566704239a12159d599d151223_0123456789abcdef", + "1227238c0115045a5648ef12016bde2312049e488c480159566bbfef_01234569abcef", + "454856ab6b598c159d7b9a5aaf0112232689565aafbf7b671501599d_" + "0123456789abcdef", + }; + + solutions["Triangular grid 4x4 with 2 stacked layers"] = { + "156p:267q:378r:489s:9t:6abu:7bcv:8cdw:9dex:ey:bfgz:cghA:dhiB:eijC:jD:" + "gklE:hlmF:imnG:jnoH:oI:lJ:mK:nL:oM:N:quv:rvw:swx:txy:y:vzA:wAB:xBC:yCD:" + "D:AEF:BFG:CGH:DHI:I:FJK:GKL:HLM:IMN:N:K:L:M:N", + "xD78flwxKLci7c27chDIsxfgij7cGMAB7w8e3shi67yDBHiHgh56fkfgHNfEBHtywB7drwwB" + "uvvBcB_67fghijuwADGILN", + "vw2334FK125u010506wx7cdi7823gF12AFbcab0p1qxy7cwxgh9y6bEJ4t166bABvwzEuzch" + "vAbAuvvwrwrs3s_2346abcgiuvwyJK", + "56lKfkGLqvFK78afGHcidi5u67FG56165a122305HIioGHAGbcaf1qgmglmL0pabbg6bbgty" + "pqyD06eDqr7889bA_1568dgikltuvIKL", + "BGab16zEfkbc6bbhAFabEJcBhmFKstcdfgrs2706uzchCDghzEglcBpuBCqrvApqgFuzcBxC" + "hGpuqrpq5urs0p3s_12acdhkmtDEFGJK", + "6bFKJKuzBHgFGMKL5a6706ghvBBGHNcBLMlm569edediFGqvBH3423hiwBijGH0pghgF7827" + "hiuvbhHNrwvwwxsxst_479abegjlzHJKMN", + "AB05GHch2riopv7c5aHNdi8d27invBFKBHLM7dDIyDvB9yafdj23didCoNFGuvfkvwxCwCdC" + "BGwBrwBGrsst5awCsx56677wwx_02fhnopruABGIKM", + "AFfgcdxCnopqgh8dqwrsfknMfg8x12syhGgFvAglAFbgbc1qbgqrwBpuklcdsxLMzEqv38BC" + "AB898dgF8xHMINEJzAvA5uzEqvqwhiijhi_189cdfhikloqrswxzABCFGLMN", + "7drwCI5638azGL67sttyAB0p7wwx9eabsxdCbcBHflvB4t6vejjI9ystwBHI5abAsyci06jo" + "qvvABGGLGHCHHM8ezFqvBC7c8xFGaf5aafEF7dEJch8d67_" + "056789adejlqrstwxyzBCHILN", + "BG89012717788d7c67fgdjchFG56zEhG38agCIcdchjIghagEJzAzEqrMNnM7wbAxC05AG6c" + "ioci7c6c2rglbgsxxC6b5auzmnuvHM050127BCbgGMhnAGABnogl0pBCpuCH_" + "0125679abcdfhlnorsBEFGIJN", + "FK06qrCDbh6cafyD5b17vAAFabvw5uGLFKwCcBch23cBzE8e3427rs28tylK38BGCDgmyDBC" + "xCFKditystagbcABcd12iHgm7chmdezA5601CIcB787cchhGbcGLDIABINoN12BCdisxzExC" + "AB_02347cdefghmqtvwyBCDEFHIL", + "GHGMnM49wxghhG9eCDlmqvejpq49xyFLio6cbhhi8eAFqrvAEJ288xghijiHHIfk165a233s" + "rsAFHMagafINkJdigF67561qfE6b8d39tyab9e16ejwByDbcjo3styrwdi0667rsejBHrw7d" + "deLM_023457bcegjklmnopqstwyBDEGHILM", + "6cnoch8eijFGdCqruzuA5umnABglafBCGHHNhGEFbhCIqvxyhiAFjIBCghejvAFKaz7dAFiH" + "34zAjoABBC787waflKhifkoN89di8dvwwx2823uviHdi8d34rxhighfgfkFK277cvw4tdjgh" + "uvch7cuzuvkJfk_246789cdefhijloqruyABGHIN", + "abdehGAB2723xybc7wcicBiowBafhiBHEJzErw9y01uzdi8dzFstazHNhiFG0pghGMiHrszA" + "EFvwEJwBBCxCmn05rwwBvwvAFGxD5a7cqvciqrCHsy01lmCIlK6cuvvw7czAwxrsHMfgxyvw" + "qraz122334nM01dCBGGLmL_12359aefhnrtvwxyACFGHIJMN", + "7w7c6cGH6bgmbgFGafdicikJGMnoEFdeqrdCpvgl34io5bbArsvBijFGBHBCCDDIuzzFEJbh" + "abqr129eHNFLbc288xazGH27stabyD237dBGdefk345bhipqafEF23pu5a05dCFGEFBC7dgh" + "BGuz6756EJBCzE7duz_145679cdefghijkmopqsuwBCDEFGIJ", + "KLvAFG28pvci7cEFqrbcwC5bbhbABGbgagFGfl8diodCEFGHGLBG12vwhnhi23gmnMHIEJCH" + "276vfE39zEuzgFwBcB7cinBCgm3sghlmfgrsdistghchbcHNioBHrsAB8dzAHNklABcBrwwB" + "bccdmn5blmdenooNnomnin_235789abfghiklmopqvwABCEFJKLMN", + "qvvBqrfknoFK3967nMAFeDBH78EFrspvqv8evAwBAFbAbgqw3sDIgh0paglKHNFKHIGHhifE" + "hnGLAGBG9yyDch3423BCbAvwvAABBCcd5bCIbc7caz7wjohmch6ccdxCabjI277cvwDI6vbA" + "tychhm4tch7cwxyDty_234689abdeghkmnopqrsuwxyABCDEFGIKLM", + "AB6vBCazhiAF068eeDCDdCBGGLiostABdi2805ghhiwB0p7dgFhGbAoN5a4twxbcuzdjej7w" + "jopq67cB78rs7dab12rwfguv3svwbg01bc12uvdiEK8956st8dcdfkKLfgBCLMbgdCglrwrs" + "KLin16di122312rwbc16vBpvdevBcB_013568abcdefghijknopstwyzACDEF", + "xDxC6v56gFHIcdxy7cBCHN0p6b78eD67896bhntyab6bvwwxnoFG7838xyBGwB3sdjEFpvFG" + "67bgnMoNhGgmrwBG05345b78vBxC05BCFKFGbgEFGHBGindivB67FLGLfE6bJK4tbginxy9y" + "xyxDglEFuvbg6bwxvw67wxGMFG_0346789abceghimopqrtvxyzABCDEFHKLMN", + "uzwxDIzE5bjIpuhicisx0phGGLAGbh7cabaz1278hnzF23bc27rsFKoNFG7dcdsyyDstab34" + "affk27tyfEvwbh890178MN05io5a5bbhchwCazdeBC67zErw7wuzEJzE7dyD23fl12cdpuuv" + "pqBG230pde89061617oN6bbggF6b_0234569abcdefhikmnoprstuxzBCDEFIJKL", + "ch56qwvBBCwCuv89fkDIst6vABgFcddedChnBCyD67nMhmlm5aazAB01AGtywx6bGH3s05pu" + "iHjI4tCD12CHFGhiGHij16GLfghivBxC23lK5ubgHIpugl5b56bgBH395alKmngmghfgnoEJ" + "IN16stjIHNsx23122rBHvBxCzECDuz6bpuuzzEbh_" + "0235679acfhijklmpqrtuvyABCDFGHIJKMN", + "bgrwvwqvsxuzBGzE01azGLvAxCqvHMCHiHnMhGglbgxD6bDIstabrxafoNJKAFuzFK7cfElK" + "qrrxsxfkhi67mnGHHNghsy78pq343sqrhirs23fE126vzEqrsy01uv5aci05qvuz01iooN23" + "eDuvcixCuz12BHqvqrrsqr05qv5avBBHzECHxCstHM9e49_" + "0457abfiklnopqrstuvwyDEGHIKLMN", + "676bGMAGABBHhi6v01uA0pdiABBC5aafbgabzAhGhnnMAGiHGHfEEJEK8dst38djglzEgF8d" + "785ursFG7wCDbcqw89uvGL06HNuzdizEbgEF8x8dvwfg38BGFGhGdiwxCIcBghxywxrw177c" + "6cvw27ciGHchhGjIghcdwCfg06decd7cGHhmhiijhi_" + "12356789adfghijmnoptuvwABCEFGHIKLMN", + "AF67fguvuAvwFKpu2756050ppvxD676cchAFbAABsx2rej8e8xwxwBcBciJKBCGLFLBGbcbA" + "hGghgFhiFKuAAFhmgliHbgmLfkhiCDjoazejBCjIABafghgl9ezAdj8eJK8dcd38kJxycBJK" + "KL5aAB9ytyxC0501oN05uAxy8dMN12BCLMjo233423_" + "025678abcefhijklmopsuvwACDEFGHIJKLN", + "qwABBCwCvwuAuz6bbg2327bc7c67ciab78pq67af6cbcab5acdFGhichfkflijBGGLnMdChi" + "wCwxxDDICH16bAwCBCBGFGgFghbhbAqrrsaf6bgmlmfl5alK8x78afnohi399eeDyDsykJfk" + "23CDafabagCH6b3416mnno9yty01rw2r12010ppqgmlmHNoNioGHkJEF23FGGMGH12gFpu01" + "EFzE0ppu0112", + "CHBHxCvAABhm27bhxyHN2316zFHICI6bpvEJxC3s399yhGhnbcchabafyDgldC12ej5aKLlm" + "jI7c23GLazGHkliHhGvw34gldi8echbcbg16vA6v6bqrty1qdediuvEF01GM12xyFGEFfEfg" + "io27CHxCCH38ci67xytyzA78bczEEJzEglzA2rbAbc7c78_" + "12345679abcdefhijklmnorstvwxyzABCFGHIJKN", + "16INuvjIHI7cqrAGuz5uejFGFK05wxsx5avA27agafijpqGMfkAFqwuvBChiciwChm23kJfk" + "ty56abvw6vpqvw39cdGHcBch8x1qwxvw9emLlK128dbc16hmvAxyEFAFdiin236bFKdi12qv" + "afqrbgwx5a38cdglrschstrsqrdeqvvwwx5601bg12uv162334231201cd_" + "0235679aefghjlpqstvwxzCEFGJKLN", + "vA6c7cqvAF5axCvAHMrw06sxstrsqrCHxCpvpqvBpupqrsqrpqpv6v161223344t786bbggm" + "06010pBGcddCBCBGoN7whifgchbcbg7cdeglEF27233ssxxy9y89vw6bLMGLhGhiiHHIINMN" + "hnDI67FGGHiovw7w676bbhchciiooNHNGHFGAFzAuzjIcdEJJKFKhnyDzEDIazabbckJEJzE" + "cdzAdCxC8xABBCABdezAzEEJ", + "gmlmGHbc67xCbgHNHIBC277c78rsfkqv38AFbA06GL6b3416klgFBGCDDIqr3sdjvAeDglbg" + "azabglsxstGLbhchGH4tEFFGcipuuz5bpqKLbcbAmLEFJKazCD6bwC67hmCDqvbhuvlKyDaf" + "jIrs9yxCgl2rCHhnHMvwxC5b78wx5a0605afrsghch7c7889syvw7cch_" + "012345789abceghijklmprstvxyzABCDEFGIJLMN", + "AFAB6bwCwx8x8dcdcBdjzACDDIFLKLFGAGEFBHHMGM5avAFGfgagAFbgbhhnmnfkdCzEazaf" + "glgllKFKAFvAvw7w17166b7dEJoN066b67uvuzINeDzE2723vwnocisxbhhn6ctyCHxCsx7c" + "7wvw6vCH4t89pvxDvwwxxDci78vwpvpuqvzA89vADIabqvxywx676b16qrst677wwxsxrsqr" + "1qbAzAazstxCINoNioiHCHxCxyyD", + "vAvB16xCCHBCdeejbgdiingl6vbAbggF7d78899eABFKdilKzAxyvBpvpqqrrw7w788ddCrs" + "56BGvBvw67fEwxio56FL7wstchaggm7cbc27bgag23af6b7cchci277cBHBGhGchci5u5aHN" + "GM01055upupqfkqruvafhGaz12rszAuzuvqvqrrs3s2312166buvAFuzEJzEuzEJuvDICDwC" + "vwvAAFFLGLGHqvjI49hGwx9yxywx", + "wBzAbc6bqwqrBGwBchhicd89mncBABbA1601wx12BHwBwC0pbgagafiHde8d9ysx5u01CDxC" + "05fk5a01rsrx28nojojIINMN6v2rchABstafzAvA6v565azE9eejjICIdCuv123ssx23AG12" + "0156ch7c675605011qqwvwuvuzzEEFgFgmHMBCCHHMGMAG9ehnBGINLMDIyDJK4txCsxtyyD" + "lmDIINMNLMmnlmlKKLGLBGcBbcbhcd4tJKno", + "pvghABwBvwuv782rfgBGfkchciwBBChncBbcdCrwKLwCoN16sx676vafGLrsCDgFbgFGqr7c" + "chhmJK0ppqFKstpu385aioIN8e78qwHMGMBCxDzAAGoN34BGBChiyDCDmLDIBC38ABABBCwC" + "7w676bEFfEafabbgejyDghEKGLfE9eijsxfg39wxvwuv3svwghwx39787cch7cej9e78_" + "045678abcdefghijklmnopqrstuvwyABCDEFGHJLMN", + "vwvAAGGHCH6b89hmgmbg1606pv12vA7838237cAFchciiHHICIwC7w6cGL34dCBCcB560pfE" + "lKKLmL5a8e01783ssy9y89wxCDhmno6c05ijchhipuvAzAuzpu8dhGzE5aiHchhidiqvMNHN" + "nMmnFGafEJlKFKFGAGzAzEfElmfkaz5a01057cinijejde8d38231201055aaffkkllm277c" + "6c6vvwwxrxCHrsazxCCD12qrrsstrsGHrs3s23121q", + "BGGMHM56wx67pvuvvwwBxDuzzE783839vAqv8diHdideeDDIFKcBvB6v67566bab277w2rch" + "hninrs5uuzaz16HNAFqrstrs05ijBHnMJKuvFKwxqvqwuvwxuzcd271qvBHNmLfEAFEFzEuz" + "6b6vvBABEJfEfkbgbcbh49glBCmnyDCDafdCBCcBionomnhmqr89di1q1601ab6b7cghbg7c" + "788ddihiAGstabij4tstsy9yvA672712jI6vvAAGGLFL", + "7d78qvdCCHmnqrrsuvuAlmnomneDjI898eBG9yhi12HMFGwCFK0616qvqwwCBCAFinCDCHGH" + "HMFGGLvAEF12gmhGFGAF3smLbA0p6b0167af7dstsx56kJfkdiin16675aij5u78nMchhiaf" + "12GM89qrfkqvij1qqr2r8ddCxCJKBCKL17fgFK01jo17ejIN9e7cBG3svAAF49LMvAMN9eLM" + "GLBCCI_02356789bdefhklmnopqrstuvwzACDEFGHIJKLMN", + "5aabqrBC56qvbhbcin67787cciijej7wwx8xCI1216890123nMjIABuzzAsxABpvBCABvBfg" + "7cpvmL7wpuiH5u34inCDEFuzFLhGchEJ7cghchhm5bzEcd17xC8ddiinuz7dbc176bEJfEfg" + "qrsxLMrsqr5ustglchxydj38bgpqglbg6bio27hG567c278x67wxCH78lKHNCH67wBBGrswB" + "wxqrpqqrrsdixyst_012345689abcdefghijlnopqrstuvwxyzCEGIJLM", + "AB1238232767fgKLzA7ddC8edeAGGHBH34EF677cbc3823gmdjuAvAqvpqcibg3s23HIjIjo" + "ioFGwBBHGHEJMNaghnhGLMFLmLgmgllK4tEFAFej06vAklvwvBwx565uuvbc9ylmbgag6c6b" + "vw166vmncd1223399eejdj8d8xwxqwoNjoej3423565afkaf5a560601122334499eejjono" + "mnlmoN_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN", + "AB5aazab7cafbcvAghbghiFGio8d787ccigm17AFqrgF89uAfk787cpukJvwejdjgl8x0pin" + "diiH9y899e8d05ghGHfgfkAGAF344989ghchvAEFcdAFCI8dGM23HN271701hG38dCBH5aFK" + "chABzAazzA2rBHfgfkqv6v124tbgHNuA3456165623di122301056b018d16126b3sbgdi5a" + "23gm3412affkaf5a_01345789abcdefghijkmnoprtuvwxyzADFHIJLMN", + "27chbhFG5aaf7ccBlmCIklfl78wxxCvwGLhi8934bA8x177dzAzEEF010p565ubhchdedCCD" + "2rhmhidicd12BGazBCxCwxqw16565bxDcBbAwB23BG12GMrwzAINyDvADIfEjI238dsx1qqv" + "vAzE3834xCzAvArwvwvAAGGHCHxCsxFKyDFGzE38wx8dxyiodiFKJKEJ7dioBGqrwxstrswB" + "BGqr1q17GLoN7wKL_012456789abcefhiklmopqsuvwxyzABCDEFGHIJKLM", + "6chicicB3416vBBGCHHIbggFAF9y27hm6b67bh78565a6738sx78FK7c8ehnbhFLeDwBwCxC" + "rxvBuvpupqqwzEBHvw5b6b06ABghbgCHBC2767EJiHcdbc788xwxzAcdglbgwCgF6bdjbAuA" + "ejfECI67cd8e5utywCafpu78bcxycd67iouz5ahiwxzEagijfkghuzvwEJ89hixy78fg67pu" + "56785a67gh786789iofEaf_012345689abcdeghijlnopqrstuvwyABDEFGHIJL", + "rwjIpuhmuzGMqvpqpuchABvAhGmLgl5bbAxC8x28277dcdcBwBiHklBGHIzEbhbgrsde23uz" + "hnbcqrFKCIiocdEKbcbhCDEJrshmzEuvqvch129eyDlm8e28277cchhGBGBCCD6v6buzBCAF" + "hiGHfg06pqvw6vvBvArwtyAFEFghBChipuioEJrsstEFrsFGuzazjoCDBCGHzAklABHNBCmn" + "jIrwvwzAnoCImnCDeD9e_01256789acdghjklmnopqrstuvwxyzABCDGHIJKLMN", + "pq06ijbhhici6cGMvAAFqrrssxABAG788xwxvwqvqr2rhGuvBCincicdvAiopuhnchci6vGL" + "jIoNghgFhG5a8ezAABuzBHxDafiohi8x8ddjwx8xstfEij9yxy6739iors2rwxklBGbcwBmn" + "565aab8d67qvEK23glbg56nowx38zEgl78pu89490p67mnJKBGFK56vAxyAFuz6b168dFK5u" + "AB6bbhvA055bbccBuz898dcdcB89_0246789abcdefghijkmnopqrsvwxyzABCEFGIJKLMN", + "zEaf7d1617786bfEBGBH67bguAwB6v6bMNDIGM2767FGhGGLKLlKglbghnchciBGxC8esxBC" + "xCtystsx0pBG8x5umn783sqwnomn5aBCJK6bgm06676bzAdjABGLwBBCrwqw6cpqpulK5u5a" + "agbgbAzAsxrsqrpq0p066ccddCxCrx8x899yyD34djBG78glcB23cd7c2723344ttyyDeDde" + "cBBGbgGL676b6778bggl_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN", + "5b6bwCbg7ccd05zAzFCH0p27bc7c67glqwbgci16566bab34mLgFwCbcGLrw1623pqvBBGoN" + "wB9yGMcBwxFKHNjIBHvBCDnosxHIuzxDjo8est78mn3sgm12gFhizFiHuzABBCBGAB5uGLzE" + "ej050p01hivA05ghfgpqqvaf9e5a499evAhieD0501ijzAzE12fEafhigh23CDABEJzEBC12" + "01ABzAzE055a050112vAEJvwwx2rvwvAxy_" + "0124567abcdefghilmnoprstuvwxyzABCDEFHIJKLM", + "vB5u566cqvaziH9y5ahizAghty0605afCDijwBABBCFGeDCDdC8dAF8x1q6cabagchhigFAB" + "FK7cwB899e27zEbcvB237c3s49GHBG78fkvBghejjofgbgJKzAiobcwCoNhmlmglqw277cCI" + "ch7ccddCwC38GMHI78GLbAAF67KLpubA27uAinpupqdi899yBGrwwBrwmn6b8dBG38dihmmL" + "ioAFchFKFGci16EFEJEFAF1216mnno6cmn_" + "012356789acdefghijklmnopqrsvwyzABCDFHIJKLN", + "016bBGcdvAGMbg17560501BCjIejeDCD6bbc12abdiflgl7dijdj232778bhbAAGBGcB7cBC" + "wx8x78nMCDxCxy2r898x16af5acBfkghzAAFINCH12hi3s6bHMioqw23af5awBgh78gm0pLM" + "DIyDstrsqwqrrssttyyDDIINMNLMGLBGvwwxvwvA565aazzAbA67dizE78kJfkag1627az7c" + "565aEKaffkch56065a01055aaggmhmch6c8d388ddici7c2734zE122312uz166b34bgpuuz" + "zEEKlKglbg6b1601", + "vwwx8d1606BHvwwBABAFcBABuAuvpv0p055b1quzingmabagdirsrw7cAGLM127c7wwCBCvB" + "vAbACD1q2823uv9eqvqwiooNjoejdedCCIFKAG3427mnnonMAFFLLMGMvA016cci399ysysx" + "inwCvw6vuv5u5aabbc01066727azzEfExCwxfkfljI7dabEJazab565afEaf5atyyD56sxDI" + "67898d78di89dj7dDIyDtyst3s23277ddjfEinEJ_" + "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN", + "6bbgwBBHglrwqwpq1qBCwCvwBG676bbcbgch27rsrwCIHNHM67wBLM78iHHMnMxytystrsrw" + "6bdihm67wBwx8x3823277cABzAwBABzEzAAFlKstmLflazlmEJafmn56iono34ABoNkl7wlm" + "hmchcB6cGMcdcikl8ede786cdCrwwBABuA5u56677838344tst8dABCHCD89BC78ABzAAB67" + "565aaf5a566778899yyDCDBCABAGFGEFEJkJijuzuAGHiHijHMGH_" + "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN", + }; + + solutions["Cuboid grid 4x4x3"] = { + "14g:25h:36i:7j:58k:69l:7am:bn:9co:adp:beq:fr:ds:et:fu:v:hkw:ilx:jmy:nz:" + "loA:mpB:nqC:rD:psE:qtF:ruG:vH:tI:uJ:vK:L:xA:yB:zC:D:BE:CF:DG:H:FI:GJ:HK:" + "L:J:K:L", + "9aaqwx899a59hx7bEIAE37wx48rv23xBkAgkzDDH676a9a1289HL26hivLuKab1hhx9a9pKL" + "2iiy2i_3789abhquvwzBI", + "04jn019apqmnlmxybfyCijiy7buv26CGghGKBCtustCDlphiDH56EFrHosptFG89uK59GK8o" + "ij2i9dhi_2469afgjnpvxBE", + "kAAEBFxyae4k9ddexByzDHpFefBFzDabHL9a59AB5lxywxFJmqde2337tJ7ndtnDimwAnr15" + "12hi1h23_29abdlqxyzABDE", + "56ijBChi4567hllp3j481237hl049d01koCDim9pmqhiijkl8cptqu7b0g6muKgwcspq6748" + "26qr2inriy2i_023679bhijoptB", + "opIJ12EItJKL4559AEBCIJGKKL26CGxywxqG9pzDko9aaejzwA6a153jEFaqJKFJAEvLpFqG" + "fvosmC6mgwoE8oKL0gvL_124koyABDEGIJL", + "qrmqlmFGEFGKklhlhiGHlmrHmnAEgkyzbr67560gzDpq59xykAJKDHlmop4kqr8oHLpqopkl" + "uK9dxBDHnrgkBFFJgwtJ_7abhkmnqryzGJK", + "yCCGaeCDmC56mqlpkllmrH2337qGzDrv9d2312os6aaqpq264kquABqGGHabDH5l6a1h56ko" + "9ppFBFghgwhxFJptFGaegkghCG0gzDxBGKab12EFBCCGEI2312_" + "1267bdefghklmprsxyzADFGH", + "FJhl5l67BF6a6m45nrxBjnxy04ae7b48IJbf7bABJK1537rvlB6a9anrquuK9duvnDEFwxtu" + "rHeu0412lpbfFGhxcdwABFHLkA9p671hdehl6aef12aq9d26lB6awx_" + "0123567acfhjlmnqrvwyAEIJ", + "qGghdenr4kKL7b6abr12xBwA01eu150gmqaqabrHAB23FGgwimqu37qrhlbf7buKEF12pqmn" + "yClmghnrcd15lBAB599pBC8c15rvEIpFtJmndtlpptvL483jlB89lpopos_" + "013467efghijnqruwyABGHJK", + "26stpqfv8cBCimABijkoqulmosjz9a122337EIimEF7nIJtJ37CD9puKBCbrmq01tuhl7b15" + "AECDptqGDHstlB9dqrklwxFGpqlmopsIHLos4kopklGHhlFGhx1h155915_" + "012abchijkmnopqrstuvwyACDEHI", + "uvhlIJhiCG9aijGKtJae56efmnqrdt2iptyzlppqghbrqGhixywAxBFJBF7baqxBEIGK01op" + "1559xyoE45GH8o6mFG9pBFjzFG3jaeijqGaqhiijwxptaedegh04hltJABJKKLJK_" + "035adefghjlmqrtvzADGHIJK", + "eu9afvnDCDqrFG23rHrvxyquqG8oBCEFFJkltJ59CD5lEIBFuvFJmn6maedtgk8cmC56484k" + "kA6ahlab2637qr89bfEFlB6mABAE7b9abr595lkl15BCeunDCDrvoEvLBClp37ab7n_" + "23468acefgknqtuvyABDEGHJ", + "hiBF1hGH23quijtuoppFFGqGIJCGdtqr2ilmyC9ppttJFJlB9p595lABmqAEsI48lmrvlBhx" + "hiqr4kqumCmqtuCDim9dhxaqefwA8c2imqeuAEdtbfefCGgw48EFFG9d4kghhiEFoEkAgh4k" + "8o_13589cdehiloqrtuwxyABDEFGHIJ", + "23yCabAEbfmqop12pqpt01st9a599d48aqoEGH9p898otupFkode04FGcdptKL6mfv9p4kEI" + "EFCGABhlyzFG89kA04pFjzsImnefopGHdeyCyzBClpFG48AE89GKKLpFlmFJkA3jhleughgw" + "FGAEuKBFlB_345abcdhmnoqrtuxyACEFHIL", + "CGkA9d9a4k6aEI8o6m8cptEF2612BFhxqrpFBCqu89nrqr9a9ppqqG6a044kiy2i48ptyC04" + "6789AB9ajnpq7n9dstJK56dttJzDBF9dkoyzkAjzgkDHbrko67op5lqrptsI9p56xBlBABwA" + "AB7bpqpt_026789abcdfjmnqrtuxyzABCFGIK", + "stdtlpklkopthi4889GHqGtJijnr2iaqjnbfpttuqusIqGlpyCKLuvst6701wx568o5lxypt" + "nrcdJK12EIhl23IJAEde7bcdmnJKhx12mC6mimkl9a37wAab6aef3jjzaelm15459a3jkl56" + "mC9p376m_024578acdfghjklmoprstuwyHIJL", + "ptghKLhlyCmCzDlmmqlpDHyzlmiy48uvbrgwklHLjzwABF56CG6mFJIJDHJKtJ3jquuKrH45" + "gklmzD04lBCDpqopuv264kfvnDpqBCBF8cstcs48EF155l4kgkmqgwBFlBwx4812ijiylm2i" + "xyBCijBF6m12wx_13689bcghjklmnptvwyzCDEFIJKL", + "xypqGKlmBCKLptCD48kl01qrlpnDst6759044kEIlBhlhx89CGFGefnr5lABpt9a59JKyCko" + "CGgkiyoEyCqGjz89GKde15efaqzDkoEI9dpF59fv4k453j23oE56599ppFvL9p4k59ae15fv" + "1h011223017n_0146789acdfghiklmnqrtuvxyzBCDFIKL", + "mqabij01br12stfvpq26KLJKgkop6alplm5l04os0gCD48GKefdeae9apt9pxywxcdqGtJko" + "6alp1himhxCGIJuKuv9agkmqquxBij23lmlppq6mEFmC9d2iopdt8cfvnDwA48pFFJ596m56" + "8c599pAEpFDHpqHLqrbrqrpq_012359acdfjklmnoqstuvyDEGIJL", + "yzjzFGBFim8o678c89mqquqrGKJKtJptpq566a9aefCGwxCDyCaqde7ndt5l48nDeu37klGH" + "gk047byzxyHLlBko7nopqGbfmn89GHxBmCkA9p6mbruKpqko15opyCGK4kCGGKquptimiy7b" + "yzuvko37qumqqu7bopoE_2356789acdfgijlopqrstuvwyzBCFGHJK", + "04oE0112237nIJ7bmnaehi0gimaqgkiyghrvqGhivL3jopospqsI266a67qustosjzrv1h8c" + "gkqr6mkolmhllpeuptABfvJKgkpqFJAExBwxgwJKoslpuKij89hioEEFhllpuvghqu8olB9p" + "8c5l9dnreujnaemqimlBxBnrmqpqBF_012347acefghilmnopqrtuwxyzBEGHIKL", + "9axybrgwghhxhlhiimlpquEFefmqijwAAEhl1hde56BCwACDABIJFGEIBChiCG89fvoEKL48" + "tu5lyCGKquEFzD04FGhl26koGHvLst67xBnD9plp8cDHlmrvzDpt595llpnDefoE9d56hl59" + "nrhilBae8o8c151h15xBBFxB56rvuvBF_05789abcdefghijlmnopqrsuwxyzCDEFGHIJKL", + "FGKLbfae5659pt678c9dEF56qr0145brbfAElmmn7n676a9a59klhxwxwAkAgk12mq04csmC" + "lmsInDoEhlGHaqstos8o8cjnoEuKFJpq1hEFAE23kADH48gkkomqghHLhigkGKdttJhx0ggw" + "qu26dteuim23qu7nDHnD6m01pq7n9p_0123456789cdefghijklmnpqrstuwxzACDEFGHIL", + "opwA8c488o159d599p56ptuv45qG4kqugwfv01lp266asIFGwx48mq56hiuv6mmnghqGxyJK" + "3jcdwx26oEopnDdtuK12kAqukoGK01jnjzosIJmqimwAEItuAE37mCIJabCGwAptGKKL1h26" + "JK6adtuvvLFJpF26FJuvbrIJquae3j7b23ab9a899a_" + "0123468acdfgijmnopqstuvwyzACEGHJL", + "lBFG89qrAEuvko9dae9p9aabbffvrvrHGHCGyCiyimmqhiopoEEFJKhx56GK67044556262i" + "high01047bCG488ocskAwAgwAE3759stsIIJJKGKqGpq7nrHbrrHEF4ktuuKKLHLGHqGpqDH" + "aqpq9pzD6aptGH9p26FGeuEFoE45uKijnDmqmn67nDjnmq677njnijhigh0g0445aqim59qu" + "mqimhi", + "ghgkABkAkl45lmcdij89mq04lB59dequ48koimosmqeffv4khigh0g2i9dhimC6mvL26sI6a" + "67bf3756gw3j67iyyzjz59eu7nGK2iuK37hxcs6aaeeufvaeCDyCCGGKuKtu6aghCDbfqudt" + "7b37mqosiy7boEos268c2i0104immq48040148qu_" + "123456789abcdefghijklmpqstuwyzABDFHIKL", + "wA9pklkAABFGCGBCAEzD8clBwxmq56rv155llpptGKxytJrHDHnD01de8ooEyzbrCDtuefBF" + "strHlp7n6asIdeeuuKcdxBmnBClmABgwrvlp1237GH8chxxB26xy0gBFuvpFiy2iCDptgwcs" + "tuab7bmnbr9adeeu487n0guvdevL9drH9a6aab8c480448_" + "0136789abcdfklmnpstvwxzABCDEFGHIJ", + "lmyz56zDwxABghxyyC67CG45hi5623gwijklgkgh48lppqqrnrjnjzyzyCBCop12ptpq156m" + "3jhx04596arHIJJKKLDHtJstjzqG9d599ddeaeaqqrnrmn6m265lrvcshlrHBF45CDmC6m56" + "454kkllppqqGGHbfmqHLfvxBosDHHLCGuKzDquDHuKopvLKLpqJKmqEFEIIJJKKLHLDHzDyz" + "xyxBiyyCCG", + "564567KL485915bf9duv9plm26hllBlpopoEEFrvijgh4kJKCGwxAEqrEIcsyzyC04GKIJmn" + "mCCDde7nCGpqJKiyhilmaqDHKLimGKefmCsIwAuKAE453jijCDeu6mgwdeae9aEI0g7bAB37" + "uK7bimxB1223121hghhxxBBCij59CDnD45ABmqpqpt_" + "013456789bcdfghijlmnopqrvwxyzABCDEFGHIJL", + "56ghop45abCD9a6a56488c12xBBCyC010448gkJKFJyzhiGHxywxwABFEFoE8ooscs9pFGmq" + "delmmn4krHDHghgkklHLhxsI26121hghgkkooppttuuKGKCGmCaezDkAcs6a4kDHzDlp67yz" + "56uvdtuKeueftJ5989xywxwAAEoE8o899ddttJJKGKCGmC6m677njnjzrH5926brhiuKsIae" + "6a26ae233jijeuhihx", + "oEFGuvEFlmcdyzmn6a3j56klEImquKuvquqrbrimcsGHrHJK5lFGhiuK8c59ijpqgwfv9a15" + "48mq455604kAosim67xypt2iCDkozD89lB569d9a59abos8o5lnDuvghhi233jgkdtijkAbr" + "AEhi1htuuvwx12oErHptlBwA8oBC4kkA4kdtbr48abBFEFEIEFxyyzpF_" + "0234679acdfghijlmnorsuvwyzABDEFIK", + "wxCDabtuBC566723stptop01266aCGyCxyxBABAEEFijhi2i45pqqrmqaq9d59yzlm6m56cs" + "hluKuvrvnr7n3723121hghgkkoossIIJqGEI4kaetJwAgwgkbf7b676a236a37BCqu9pbfpt" + "oppqqrGHFGoEop9p89CDBClBklkooEEFFGGHvLqurvvLHLhlnr7n372326fvlppthlhxxyiy" + "2i266aaededtptwxvLfv", + "immnjn01BCKLGH23EFABef67qrhi045612FGxyghxBim6mlpBFpq01bfCGEFGKhiqG59CGae" + "EI4k48CD9p9d0gos6aFJnD4kae7nlB6agkghhlHLkolmlBBFpFpqFJ37oEbr3j1hijiyopqu" + "8c8ojnzD56hx4kDHimrHmq7b67uvzDquqrpqnrjnmqvLrvxy6mim2iij_" + "0123456789efghijkmnoprtwxyzABCDEFHIJKL", + "ABEFoEBFGHxyBClBJKpFyC9p8otuptpqopmqpqlpbfqrjn3j37mCCDnDkoKL45gk7b6mij3j" + "jzosEFEIFGHLkooEEFBFlBFJwABFptlBjn4k2iuKiy04kAyzdtmqBChx7nAE45678c01xywA" + "2iEIaetu4k1256pt48yz23121hhlaqjz04DH48lpjn8cnDDH67GH7bbf7b_" + "0235789cefgjklmnoqrstuwxyzABCDEFGHIJKL", + "45IJCD48kllmko12BC8cmnAB89kllmhxghgkkAAEoEoppFFGCGBCqrklhl1h0104nrGKBFde" + "CDCGGHJKFJFGqGqukoxyyznDwxKLwArvvLHL26kAFJ6adtxB37kolBabptwxlpimmqqrimos" + "pqqrbrophlaq8clpEIbfpFpttJcs7boE8c8ooEEIsIyC673723ijiy15yC59bffvij3j23CG" + "GKBCqGaqde9d59566a15010gdt", + "ijimmq9aAEBCCGEIyzptCDjnjzdeos89dtlppttuquqrnrmnhlJK23tJef3j12482ixy6mmq" + "KLim7nhi01vL5l04cdmn1hlmFGrHwAwxfvab9a0ggw01koosstdtdeeuquqrnr7n37231201" + "0g89klmqlmiycdimjn5lablBmqmCCG9pGKFGBFpF9p8948455lrHHLvL6mnrGHoEFGhx8oBF" + "FGGHrHnrjnijiyxyoshxkooEAE", + "2389CDmn9a9ddebrDHIJjnmqKLimpqqrGKAEpqABopEFxypFlBoE1215mC6mwxfvpq45xBmn" + "quFG5luvHLtustvL48eflm9pnDuK7ngh9ddeef56fvjn04vLHL4k0g45GHtu59233jFG9dlB" + "gwkocs23st5lsIossIlBgkmqjzBCBFcsij59ko9d5956BC6m0gimijmqpqoppq_" + "01235689abcdefgijlmopqrstuvwxzACDEFHIJKL", + "de4kxyklwx3j67BF898cyzIJFG6awApFFJtJ9a9ppqJKGHhighabyzjzijgkkobreuFGrH26" + "378orvkA59156aCG7bkostosoppFFJxyCD0gimoEABtufvBCjn6mnrnDCDmCuv6mtuAByCop" + "26vLptdt59CG9dpFyCEIxygk6756hxAE67ghEIgwwAaehighgwijEFAEfvEF37_" + "12346789abcdefghijklmopqrstwxyzABDFGHIJK", + "xymnlm67BClpopkoABhl56pq676aab5901121523124k7nmnmCBCxBhxhi2i266aaqqrbrpt" + "BFst6mlphlptpqwAmqpFyzCGyCiyimmqkl9pdtlBBCABAECD1hGHFGCGBCgh89pF9p89hiJK" + "GK48GHrHnr3jjnpq37quuKeutJKLuKcs7bstcs12tu8cuvfvuvstosuvrvnrjnijhihlklko" + "ossttJJKEIIJJKKLvLfvbf7b3723121hghgkkA04", + "pt45EFEIIJFGlmtJhiGHqrlposmCBCBFEFoEoppq7n67CGquijlm59gkJK9apt9p9daeFJ15" + "HL3jAEimEIAE9pmqijmC37lmhl9p9aaqaepF04899p8o1hmqkoGHGHDHnDmnmqpqpFEFaboE" + "gk3j8c8ofvqruvoEtuEFBFxyyzxywxxBBFFJgwJKIJIJsIsttuuvvLKLabbffvrvqr7b_" + "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL", + "15JK56GK6745aqmq6myzaeimGHlmabdt48klxyqG599dpqmCBC6m6aaqCDiy2ijnnrptqubr" + "ABBCmC6mjn7n3712pq9auv23dtnDabjzxBwx127n7bop3jxyIJimBFwxJKEImntJKLkoyzlm" + "jz9ast04ptAEpFcstu8cptuvlphllpptstcs37ef45dekA8c8959cd7b8cef9pbf8o7boEpF" + "8o8c9pcd_01345678abcdefijmnopqrtvwxyzABCDEGIJKL", + "AEABstFG12BCGHhlEIwx1559CDwAgkAE5l45488oopmqpqqutusIim04hxoEklmqhlghgwwA" + "AEEFBFjnFJFGuKrH6mcdBCmnnDlplBBFcs9ddtptmq6m56599aae1hqu6aos01235lhx26hl" + "iy6m7bHLDHCDimiyyCaqaqaeeuhxjzmquvqumqxyhx2612010gghhllmkl23HL4k373j377b" + "_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL", + "GKJKlp67xBxyyC56CDlmmnimhl9d155l599ppqmq6aDHlBkooplpzDaqwAwxhxghgkos7bCG" + "56266m677nAEptlpklko8o8ccstuqupqoposst4523qriy2iiyyCmCGKuKqumqlmhlghgkko" + "oEEIIJgwHLlmGHwADHqGrvvLHL8opqnrmn0gnDbrbffvoEgw7b3jlm8o5lvLwA377bjz3jjz" + "_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL", + "osAE1567EFkllmFGcdkowAqr3756ptmnoElp67ABkA6a9aimkoabBCBF9dCDyCiyhighgkkl" + "lBhlpFtJijdt5l59676aaeefbflBkl12018c3jnr0gmCop6mim2i1215csbr6aaegkklpqEI" + "GHCGHLjzEFqurHij56jzmquK9d455659GKqG1hhx1hqrnrmn15FG59379d6ade6mlmeufv7b" + "bfim6m6a7b37_0135679abcdefghijklmnopqrstuwyzACDEFGJKL", + "kl677bablppqghgkkoiy569dhi592337678c12mC9pqrquuvkABF01lB6mhlaemC2ipq3j04" + "48yC154k8c89wA6mJKDHcsbrrHAEHLuK9a9ppFEIFGlp7nwx23imEF5lFG15rHfvpt12pqqG" + "aqnrmqrv01quCGrHmqtJimGKwAyC23xy1hhxzD45dtwxyz561hBCCD45sItJcsij7nsIxyyz" + "hxBC1hhxkA3jwxijwA_01234567abcdeghijklopqruvwyACDEFGHIJKL", + "6agkkoGHhiAB45AEEIxyhlghgkwACD6756595llBABAEoE8olphllmmnjnijpt8cyC9dCGGK" + "dtaelB9a6aaq2iuvhi2i233jHL7bbrnrtJDH12sI48vLHLGHqGqumq8915454889ghgkjz9p" + "04uKkAcs7nlm2imnjnijhihlmClm454kkllm6mmC597nEIptnDeuAEbf7bEI679p26677njn" + "ijnD15uKbf_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL", + "9aBCabbf0104klIJFG89fvBFrHqGkA12EF9dHLptGHlpjnklko8o484556iy4k455lhlgh67" + "GK56CGCD01wx04BCde8czD48tJpq2iosGK45hl1h15qrpqbrFG0gij9dEFuKeuEIoplBhiij" + "1hlposnrjzcs23BC3jxBBFFJBF59CDjzxBhxqrqu1h8cmn1559mCqrmn9dsI011201cs040g" + "dtptBCstABcswAstABBC_012345679abcdefghijklmnoqrstvwABCDEFGIJK", + "23pqklkoop45hlhiwxpqpttuxBijkl5l5615126aab7bqr9aBFpqxyhxhirvkoop9p8948wA" + "lByzEIqrkAkooEhllpwxwAosstptjzlBmnqGbrab9a9ppqmqmn15oE6m9d3jrHjnaeeffv8c" + "890gnDnr59vLrv8o9dqudteuFJqupqqrnrmnimhighgkkoosAEpFBF6m15hxxy3jxBBFFJIJ" + "EIAE0gkAnDkooscs_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL", + }; + + solutions["Path, N=5"] = { + "1:2:3:4", + "233412", + "1234_24", + "12230134", + "34011201", + "342312_24", + "011234_014", + "012312_023", + "0123342312", + "012334_024", + "122334_014", + "011223_1234", + "23122334_13", + "12233401_013", + "123423123401", + "23120134_134", + "23123423_012", + "233401231223", + "340123122301", + "341201231234", + "341223011201", + "3412012312_24", + "01120123_01234", + "0112233423_013", + "12012312342301", + "12231201_01234", + "2312013423_034", + "0123123423_0123", + "122301341223_01", + "2312011223_1234", + "233412012312_34", + "2312011234231201", + "012312342312_0124", + "013412230134_0134", + "230112342334_0234", + "12011234231201_0123", + "12342301122301_0124", + "1223011234233412_0134", + "120134122334231201_0124", + }; + + solutions["Path, N=10"] = { + "1:2:3:4:5:6:7:8:9", + "4567788934122334_156", + "126756236778673445_156", + "233445566778895601_126", + "675678674534231201_567", + "5667012312780189_035678", + "23344512560112233445_023", + "1234453423017834455645_012357", + "6745897834566778894534_356789", + "014567783456678912782367895645", + "125634452378346756452334_135689", + "1256674523342345122356_01345679", + "122345011267347856674556_0123456", + "124534677845566701234534_01345678", + "78341201452312342345566756_12345679", + "4567013445567867564534231201_01234567", + "23345678671223341278564501348912231256", + "4534566723563445566712342312011234_34567", + "6789345645785667342312568945342301_35679", + "7889126756237834120123675612234534455667", + "01671245347801897845566756452334_01234678", + "34786701234556341223451289677867_02346789", + "5601786778891245233456452334561267_024568", + "452301127867892356014567786756344589_03489", + "34128923455634786723127856014512342334451201", + "89017812453423455634675645563423120123_03579", + "1234566778450123341201566745892356344578348923", + "1256786778012312564534894523785667561245017834", + "233445561223340145566756122334455667786756_012", + "2389122312785667564578344523561223016756_13569", + "5667781256011234453423893467455645342367120178", + "782312450134238956126745566778346745566745788934", + "566745342301128978566745564534452312011223_056789", + "3412675645782389675667783445678956672312342334_1456789", + "783456896745231256677867342334014556671278894523342345", + "2301677845563412234501566756123445562367347889_02345678", + "34566745123423344501561245342334455667786756458934_01345", + "456723566734011289457867566745342312011278233445_03456789", + "457889345645675634234534451223346745786756453423_12345678", + "785667785645893412011278236778348956455634231201_01345789", + "6734453423561234457867013456786723128901786723563445342356", + "78125634231267455645897801673423564534894523125623_1345689", + "2312568934011245562367567834453412562312236778893478_0125679", + "45564567342378456756671245893478450167566723124523342378891201", + "01126734015667452334237812568967786789564534566723786756_0236789", + "56674578564589342301456756456778893467455645673423122378_12346789", + "671234453489452301785634671245563423346756784556893467786756_0135679", + "348978452334451256012367781245344556672389784534124501126723_01235679", + "45342356124556346778455623456734017812562345345645568967786756453423120" + "1", + }; + + solutions["Ring, N=5"] = { + "14:2:3:4", "042312", "120134", + "01120134", "0423_0123", "0104342312", + "012334_023", "123423_124", "3404011223", + "340423_014", "013404_0124", "042312_0124", + "231204_0123", "01042312_024", "041201_01234", + "12230112_013", "34011204_023", "34042334_023", + "34122334_234", "341234231201", "23040104_0234", + "34122312_0123", "04011201122334", "3412012334_234", + "3404233401_0124", "340412012334_024", "041201043423_0134", + "122334040112_0234", + }; + + solutions["Ring, N=10"] = { + "19:2:3:4:5:6:7:8:9", + "67122378_168", + "010934897845_123", + "340945564567_036", + "890178122312_029", + "457867340956_01568", + "89561267237867_159", + "56674512013445_24569", + "12458901564534_123458", + "45096756897867_023579", + "7889016778346756_0145679", + "1201235645126756_02345689", + "09122389347867010901_01245679", + "23897809897867560145_23456789", + "093401231289786778233423_02479", + "566712230134894512098934231234", + "780901456723563445238978_01258", + "8956670989455601123478_01234568", + "78015634458909672378673423564556", + "78670112897867892309568934_06789", + "786756344512233478560112_2345689", + "78895609674512235678346745567845", + "01094567342378128901120109_1356789", + "230109342312564523344523_0123456789", + "455634091223453467122389783467_01456", + "451234785601674556342345091256_123468", + "456701567867897845340989782334_034679", + "67013445120901091256236734_0123456789", + "781223896709125678348909010989_012678", + "01561267784523340109011223344556677889", + "097823896701122301560901120109_0135789", + "56236756013412018945782334562309675689", + "892309017809890934784534120145_0125678", + "092345895601781267788945342345_01345678", + "34098967455634011223340989788909_01234578", + "45122334120956786756457889015645_01234578", + "78341267237834890945786756677889_02345789", + "1201893423457812563409230112233445_0124569", + "234534785623891223013409120109897867564534", + "786756238909017834671223017812092356_345789", + "453456674523785612673456455623341267_01345678", + "897809458978235601450901344556675645_12345789", + "8956346778455609230167093489677889090112233445", + "09017856896709234534124578564501455667788909011223", + "011234235667786712095601122389455667786756_02345679", + "237812010956234589096734231223566734564534231201098978", + "12014556670923344556677889120123093412014523122334_23456789", + "0145788912095667788909011223344556672312097856344556677889090112", + }; + + solutions["Wheel, 5 spokes"] = { + "145:25:35:45:5", "04123523", + "010434_034", "1512233401", + "253415_135", "353423_134", + "4505122312", "053512_0235", + "120523_0235", "151201_1234", + "010545_12345", "01250405_012", + "15120134_123", "23122325_135", + "23123423_012", "15120445_1245", + "23452515_1234", "25451501_0125", + "35452505_1245", "452512_012345", + "05341204_01234", "05342301_01234", + "1523123423_134", "3401231204_023", + "34052505_01234", "45053525_01245", + "04351501_012345", "0523451534_1245", + "1235042301_0123", "12453423_012345", + "1535011201_0135", "23341223_012345", + "25053545_012345", "34040515_012345", + "34250504_012345", "3512013423_0134", + "013423120104_012345", "1223340401122334_02345", + }; + + solutions["Cylinder"] = { + "19a:2b:3c:4d:5e:6f:7g:8h:9i:j:bjk:cl:dm:en:fo:gp:hq:ir:js:t:ltu:mv:nw:" + "ox:py:qz:rA:sB:tC:D:vDE:wF:xG:yH:zI:AJ:BK:CL:DM:N:FNO:GP:HQ:IR:JS:KT:LU:" + "MV:NW:X:PX:Q:R:S:T:U:V:W:X", + "yICMuDIJOP56blwxpqsCxyajCDisWX45IS34yInoop8iuvuDtDjt3dISdnSTTUnxBCstcdrs" + "uDdnOXnoVWmnxHLVAB2cvwlmoy12eoBLisrBsCBLKL_68alnoqtuvwxyIMPSX", + "JKHIKLMWCMsCijhrhiisuvghoprB568i0967pqoy45vw34stLMyIwG23fgqAISMW2cyzGHwx" + "opBLHR7h9jfphipzijnomnzJnxtDOPisghPQsCpzdn3dlmjtfgefeoCM_" + "0568hjpqruvAGHJNOW", + "56KLxHcdyzopabVWxy78ENGHLMoyMN3dwxJK6gWXvw01opghABpqBCVWxyqruDUVCDtD12uv" + "KUzJjt23OXENCM34sCpzuEEO45dnhr5fisrssCfppzzJCMkuJTEFENklnxxHHRnx_" + "0358bdpxzADEHKLNVX", + "56WXbcOPrsCD67MN09LMjt789jnokulvlmBCuDuvxystwxdeKLvwklCD8iuvakuEklrsPQqr" + "efisijqAopfgghsChr0ahiktakkuJKAKGQktIJKU0arB01wGmwIScmBLwGDNENLVEFENFP_" + "059cdjlmnrtuyBCNOW", + "RSmnklSTlmoppq78ktkl9jstkthrisJTUV01gq1b674556BLnostLVghis45tDST343dLMwx" + "BCABuDzJmnCMvwuvopdnqAdepzefnxRS67vwgqQRPQxHuDzAOP5fDNMNAKMWktakKU6g0a_" + "0489hiklmnoxBCDJRU", + "34fgij9jBCMNjtLMCDQRghhiyztD562312ijEFABrBgh34fpGQkt1bajwGakpz01IJfgkuij" + "HIak12hr4eKLisxHeomw6grs23rBbl34oyFGcm2c89nx09gqqABLAKqApq01gqKU1bbccmmw" + "_024589afgjyABEJLNR", + "uvOP01ijeouENXEN23IJop099jghpq4534efhi67ijHIaj0aEOfgghhinoGHijhrop12FGhi" + "rBMNvwlvblLMqr56JKrsgq45sC1b4evFablvijdeqAAKKUajIJdnghuvuEpqvwfgHIwxGHFG" + "xyyz_1357aegijnovwEJKNP", + "STrsABcdop23OXRSJKeoOPpqfp34GQ12KLbcab67IJnomnakaj5f45zA23lm56uD45JKFGqr" + "yzpqopku34klEF452ceo5fuEHIcm45AKqA4emwGHfpLMwGuDCDGQkumwMNcmDNeoakuEEOtD" + "uEblku1bjt9j_1247dejpqsBDIJKQTX", + "kt78STpqstqr89rsISstpzCDyIbc6709oy56jt34OPgh5feo7867BCab56CD8978ABuDfp67" + "yzuvOXkl4exyeffgfpqAop5fde4ezAfpyzpzWXwxVW5fktxyghst6gpqhr8irBhroyBLsCxH" + "isBCsCCMGH8iMW_03589chiklpqrzBDPT", + "devwrsqr45hirsabpq565fSTKL6734uvefuDdnMNTUajHR56stBLIJnx78ijxH3dRShitDrB" + "LMhrdn45sCstSTuDpzzJdeQRJT8iyIuvvFKLnx4e67CMMWxHHRISJKFPHIxHnxdnqrqA78AK" + "eocd2cKUajoy89mnmwnodnak3d_4567befhqstwJKNRTU", + "122345pqeffg67QRef567h34MN09yzPQCDVWUVENhr67zAqAGHBCabrBOPktqrBLghklMNuE" + "lm01mnENstktdeKUrswGMWuvbc12gqlvEO7hcdhruEblkl1b7h6g23cmisku3ddnrBbl4eak" + "eomwBLnxVWUVwGLVdnTU3dmwxHBL_1469aegpstuyDEHMRW", + "UVHIyz01xyIJwxOXvwxHzAwxnxajPQEFqAnouvvwQRJKABENfp78HIIJHRTUWXMNVWRSLM67" + "lvKLkuabLMBC09xH89AKLVHRGH78CD56blFGPQEF7hJKnx45op4eDNJTOPOXQRhrrBpqBLPQ" + "lmgqeoLVOPNXpqDNdnmwBL3dNXOXWX_18jnpqvwyzFHIOPRSV", + "ABSThr7hrsIJOX67uvzAENuD56vwopno67yz0178kl89zAdnlmlvOPCMRSfgGQ45abWXyIEO" + "qAHIMWTUCDktBCaksCxyktST1bvFzAFPCM6gUVVWnxisENMNvF09sCblrBmngq67ijlvENyz" + "vF6g78cm34ABuDLMzAkuefblmnTUfgyI9jISqryzUVghhighyILVBLLV_" + "057befhkmpqrstuvyABEIJMNOPQRSX", + "12EFCDFGvFvw2cNXCMghnohiDNcmrsOPop89tDpzzAijCDeoaj9jnoRSQRopWXmwGHmnhr01" + "wGfg7h1bblKLFPOX4ecdOPmwqrVWKU8iGQpqnowGHIBCENGQBL67OXAKvFqApzmwzJghpzuD" + "gqpqvwFPWXefOXopkunode56AKcmDN6gmwOP2cOXVWnxxHmncdrBvwdnCD3dWXOP_" + "01789deghinpstvwzADEFGKMOSUVWX", + "ENLM78BCKLEFCMcmlmsCfgGHcd3dCDqrTUFGghHIpqUVxyTU56qrMNrBvF34GH67lvdednuv" + "wxmnPQ45no0auDVWBCxHbl786gSTCMIS343dfgWXyzABijFGHIvw56455689GHwGhi09uvBC" + "HIisSTmnsCmwcmopfp2cNXjtDNwGopoycdfgNXqAWXistDDNNXcmjtuDmw8i6ggqfg_" + "035689cfhilmnoprvxyCDEGHLMNPTV", + "JKnoIJ3412ghRSrBBCsCQRKLwGdeajCDoyklakfg09ajzAJK89dn8iuDmn01uESTsCpzqrxH" + "HIJT4eOPGQ56mwisfppqPQwGcm3dGQzJ1223lmpzPQ67bcGH12IJlvCMuvHIab5fhifp0ais" + "MWsC097hcdis01ijFGvF89lvhrFGajjttDrBBLLVDNpzqrBLpqophieo4eeopqbcab_" + "023459cefgjlnorsxABCFGJKLORSTX", + "45xyOXqrhrJKvwdeIJcdGQzJAKopklUVWX2clmqAOX34MN56rBVWajnoFG67OPEFUVFPkuPQ" + "uEKUHIstAKijqAOXIJblNXeoghgqOPlmuDAKtDjtGHLMWXtDDNpqNX6gcd1bbcvFgqdeKUQR" + "oy8ihiHIJK3dOXmnKLyI7hoynoHRJKBLoptDhr01zJrBBLmnLMPQjtFP9jjtpzoptDdnDNnx" + "MWhrdn_1245abdeghlmoprsvxzAGHKNOPQRUW", + "xyNXzAEFABdnkulv34uEFGfgajuDdeFP45wxBCyImwuvENCDakzJefrBBCophrwG5fCMcmsC" + "GQisfppqyzxy89mw7h5fEF8iGHFG2cuDPQrsnxMWMNENxHyzzAOP34PQAKuEABisHRJT8i09" + "RSLMcmBCzJ3dqrdnSTCD89pq3dqrCMmnTUUVopnoyzABMWoyeo8i4eeoissCiszAAB8ioyyz" + "_02359adgiklmnostuvwxyzABCEFGHJMNPQTW", + "BL4evwmnuvABvweowGqr78mwwGgqPQOP89bl23rskuuvcdst01QRGQGHPQqrTUMNSTISakfg" + "BCcmvFyIOXAB2cOP7hHIlmzJmnIJFGNXopbclvHIOXGHHRefajcdJK78nofgoyabrsijdeEF" + "67gqRSqrCDLVDNOPuECDKLeoopFGPQajpq4eBLLVuDsCCDWXijis8i56pz5ffp5fijAKsCCM" + "qAAKsCrB_13489bcdfknpqrstuwzABCGHMPQRUV", + "xypzopoyefGHxyPQktijxHxyWXQRCD89zJtDhifgpqgqjtGHdeEFefisBLcdBCABzAoyDNFG" + "ghdeCD6gabqARSGHeocmbcoy7hENMNFPklHIyzqrtDcdBCvFsCKLktrsgqENmw2c4e09hrHR" + "ajJK7hKLJTabST78LMuDzJKL6756GHLMRS89lm8iismnFGpzEFuEnxxHHIkusCCMISHIaksC" + "0aiskuuE_0679aefghikoptxzBCDEFGJLNQRSTX", + "2cuE34ajmwlmuDuvmnQRWXnxoylmMWwGqrLMklpq45yIVWfp89UVxHBLOX34blHRRSlvmnoy" + "nx56KLijeoWXkttDklrB3dhrTUSTefabLVBLUV7hjtlmmncmcdVW09qr01vFkllmnxFPHILV" + "rsIJijdelvrBuDpqhiblzJ1b6gCDqAuDisHIgqABBLLMcmktkumwuvMNuDNXvwktDNvFuDqA" + "ABBLOXABLV_2358bcefijlmnoprtuvxyBEHMOQTUX", + "DNOXVW56ijtDrBuvzAjtrsvwIJPQABHIxyJKFGAKEFBC4501GHpzuDFGyzismwqrabcmEFJK" + "09fpABWX0167OP56ghuv9jvw78mwsCzAwG67BCABUVKUwxuDPQstMNpqDNijmnbcsC12cmrB" + "2cuDAK12mnis8icmsC5fisQR8ikuakMWRSBCqAnouvCM23OXsCWXVW2c0aCMoyISyIoyISMW" + "bcabakktstsCstCM78LVktBLrBhrrBBL_13459achjlmnrsvwxzABEFIJMNOQWX", + "GHrsHIabwGUVktuEMN56IJ45ABzAsCisVWku89nxFGKLqr0134no09xyLMJKpqyzABvwzAuv" + "3dOPEOEF78ijLVvwOXCMsCENdnkl8ilmPQTUFGop67GHSTTUgqnxstHIBCajpqQRRS6gblab" + "wGGHxHvwHRqrGHuDKLktxHuvMNtDaknoJKlv0aENsCisUVKL78zJ8iEFjt9jxynxisyzijWX" + "aksCdnnxOXCMWXOPFPoyhiPQMWCMijeo_15678ainprstuwxzBCEGHIKNOPTUVW", + "67FGUV45wGOXBC01VWCMIJuDPQmwABBCJTyIzAMWMNzJrsEF565f34ISopab894512noENMN" + "ij233dpzeffppq2crBABJT9jhihr7huv5fqrcmST01UVvFFGjttDijbllvRSqA67vwCMdnAK" + "34wxFPqASTmwBCGH09EOopKUJKwGhiuEcdbckuxy23cd12EF23IJyI9jjtijENMNCMEOaktD" + "MNJKxynx01DNtDjtIJkt9jdnkuJTnxIJNX_0245678acfhjpqsyABCDEFGIMQTUWX", + "23EF67RSqrAB78ijkt09hi0134LMghkuopblCD6glvlmBCPQCD1brs09wG23IJQRtD4eblTU" + "GQFGFPnosCfpQRKLAK89CMqAKU4512op5634OPzAyImnnoopgqOXuv67AB78zJktwGoypqmw" + "6gpzvwKL893dpqwxjthrLMmnEFvwdnENDNrBlv45tDMN01gqRSijTUnxvFLM6gklxH09nx9j" + "uvqAFPST67mnjt9jAKqARSQRuDuEEFtDFGGHHRGHSTFG_" + "12456789bfgijpqrtuvwABDEGJMQST", + "CDghCMHImncmvwhruvvFkunxBCVW67lvrsTUWXakvwkl12PQrBhiyzQRlvJKhrxyyzMWDN01" + "FGfgNXktef78OPeoABCDGHIJxy89DNBCqrPQ672cNXjtgqijUVVWtDFGWX0acdajHI12HRrB" + "zA5623mn8iklhiLMUVlmBLwx5fIJTUqAABdnghjtqAhixH34nopqLMop6712MNGQgqnxdnEN" + "qABLmwAKqAMNlmxH56LMMW6756KUGHHIIJzJIJ_" + "01789abefgklmnoqrtuwxzACDFIKMNOPTVWX", + "09vwde3d34TUefOPVWEOfgUV01xywxzJlvvwmw89dndeeo2378MWKL6712klnxsCPQFPOPkt" + "dn01stIJijENhrisajabmnlm09EFefCMoppqqAFGnoQRLVgqbcpz56op6gzJOXIS8iAKWXuD" + "pzqAsCajCDjtxHJTajghmnfgMNOPtDKLENCD12IJRSdeLVKLLMPQwGBCQRCDlmJKOPhiijOX" + "jtKLghuDABkuBLuEktkuLMOPISIJzJpzzJfp_" + "02345689bdefhiklmnoqtuvwxyzACDEFGLOPQRSTVW", + "QREF0901MNghRSTUxyzASTEN1bABfgvwklCMrsbcwx89lvqr56uE67hrpqopMN7h45LMPQKL" + "vwKUNXrseohrpzgh4egqMWOPOXoyeowx349jsCBLyIqA6gajgqef5fcmzAhiLM78KLENoyku" + "rBNXJKfpFGpzakISDNWXpqyIxyzJabTUajBLMW6gtD5fij01ab09yzwGrBhroyeo4eUV01QR" + "hiTUEFFG7hblxHmwPQFPhieoop1bcmmw2cwGEFoyuE_" + "1456789bcefghkopqswyzABCEFHLMNQRSTWX", + "fgENEO56xylmMNsChiyzFPrsuvefwxuExyCMajabVWwGuvUVoyhrSTmnsCcmRSlvijghLVrB" + "MWqACMcdfgKUyIhrMWmwrsgqISdeopWXLMPQOPsCOXefMNfgPQisHRQRGQwGwxAKDN2cIJno" + "JK674e5fuDqrbc78ab89xyWXmwcdkuLMoyCDbcgqKLOX8iajdnVWbl2cak6gdexHisrsnxBL" + "xHrB0aeokuajcdlvbloysCCDBLDNSTbcabajcdCDab_" + "245acdefghijklorstvwxyzACEFHNOQRTUVW", + "GHijmnyzisKLFGzAyICDajrsHIuDvwABrBlvyzzAABijBCsCcdAKpzbl12HRvFTUghCDDNEF" + "NXCMku1bdnlmfghiLVlvJKwxSTuDgh895fuvCDgqbluDlvmnnomnakOXhizAGH8ilmfgMW45" + "56deVWWX34DN0a45VWxywxFGJTyzghhrABrB67zAGQhrefBLAKAB09LVkuoyfg0a09eoabgh" + "HIuvuEblvwlv8956abzAoyyzzAOXxyOPOX788i67issCis_" + "23458abcdfhjmnrsuvwyzABCDEFHIKLMNUVX", + "JKGHfgIJxyCMtDyIISHIHRKLbccdFGEFTUghUVwxvwaj09abKUJKJT1bVWpzGHIJoyMNEOxy" + "eobcHI4evFLV12jt23HRCDENOPBCakWXoytDqrOXlvEOxHmwuEBL01KLmnJKHREOpqfpDNbl" + "ktrBhrVWeoLMOP12CDwG78cmklnoyImwtDJTIJrB1bBLwGBCefblktrBCM5fAKWX7hhrak0a" + "qrMWqAgqyIsCCMWXxy6gIJwxAKxyrsstlmJKrsJTSTRSST_" + "189abcefgjpqvwxyzBCDEGHIJKLMOPRSTUVW", + "rsuDuvpqRSQRhrklcmbcbldngh12ktklENjtLVis56OPEOEFvFrsMN01GQoyDNuEcdHIstkt" + "xyVWUVwxdePQ23STMNWX2coyeoxy67FGIJGHopvwBLJKklkuuvabbcAKcdHI09fgKUabOXEN" + "akTUij4epz89OPAKeowx09de0aFGGHvFFGGHsCophilvkl1b5fijakMWzJefHReoOXdeEFcd" + "oydeUVeoRSklyzoyblqAgqbcxyqAAKajMNQRLM1b_" + "02359abcdhjklmnopqrstuvwxzDFHIJKLMNOPQRSVW", + "efku12IJTUzJSThifgLVRSCMrBHIGHBLeoghlmijOXstJTOP5fakKL0asChiLMuv1byIajCM" + "ISoy4eeoJKvwENUVIJbl12FGWXuvEF4eijfpisVWKLdngh1bLM3doywGabHIvFLVOXHRyIWX" + "CDKLbllvuD34xH8iCMblrsoyPQjtNXop45OPqruEnoGHAKQRrsPQHRfgfpiskusCmnopuEwx" + "vFnxoytD34dnjtCMzAyIxHpznxzAISHRwxyIoyijdn3dhidn_" + "02458bcefhjkmnotuwzBFGHIJKMNORSTUVWX", + "EFfgENMNuEFG23kuakvF780aab1bij6734KLPQghhi56GHajwx8iOXuvktqrAKWXvwlvzAwG" + "pzopGQBLpqKLvwxHcdyzTUwxnoMWbcstxyxHblsC4578cm23isKUAKCMIJgqqAwxMW1blvbl" + "dnijmwabgh1bhi89gqOXVWsCKU12ijisop8i6g01isghyz9j09jtGHHIuv01yI9jtDHIDNno" + "uDjtajoysCWXOXGHpqOPFPFGeoOPOXpzuv4ezJwGmwpzwGcm2cmn_" + "0124567abdghjklpruxzACEFGHJKLNOPQTVW", + "sCkldeaj34uvTUefwxktuDwGxH23mnpzopMNlmnoGQzJmw9jisJKdnlvbl12kljtDNbccdIJ" + "ghpq4589ktklfgrBLMvFdest01MNnxEFUVdnVWwG3dwxzASTBLzJrsqrvFWXnxABBCUVTUoy" + "VWrBNXtDRSktUVwGzAhrlvghrsyInoABSTrBQRABhrak34TUst7h23ab1bPQISyIFP121bIS" + "MWSTbltDlvopfppzVWQRfgUVoyghrsvFFPfpvFsCCMMW5flvsCbl_" + "24589abdefgklmnopruvwxzCDEGIJMNQTUWX", + "xyPQ56WXyzklOXwxxHktqrlmdersFPRSMNMWWXxyQRvF78HRsCzA12AByzwxhilvtD01OP5f" + "89zAPQ67noHIFPvFGQuvSTkuOXkteoklLMvwKUJKqAjtmngqAKJTPQfplmOPJKhrIJwG78qr" + "klqAAKDNpz9jBLMNcdrBBLqrrsHIDNopdntDjtmwMWuEnxuvcmEOuELMno9jqrmwwGGHuDtD" + "IJ6guv8ivwxH09wxghqAQR01opRSQRKLJKvwKLfgkuzJpzefzJfp_" + "2567cdijlmoqrstuvwxzABDHJKLMNOPQRSWX", + "ABMWzA01hi5fENBCstPQcdopCM09ktMN78LMKLAB45WXvw12FGEFdefpsCdnisstBCcdLV67" + "fgBLIJ56ef34wxgqzJdeqAAK1b23QRklCDCMKLwGFGsCrB01blktGHLVVWlvnoIJpz89JKhr" + "bcnxRSvFFGEFuvUV124euE2c34ISpqlvTUWXFGGQ78QReost7h9jRSOX89PQoy09IJOPEOjt" + "VWPQuDWXqrVWJKIJrspqOXLVnoBLLVeoyIrBhrEOuEtDkuakkuBCuEEO_" + "02345679cghikmnpqstuvzABCEFHIKLMNOPQRSUVWX", + "34uvJKvFFGHIopfpefrsKL56abyzuDIJGHBLJThruvrBstlvpqqA7hHIgh89OXNXDNuDuvvF" + "FP01oycd12IJdejttDFGrsyICDuDfg67LMghuvbcAKcmmwKUWXzAjt45AB0aISyIst2301PQ" + "VWwGhiMWktku3d4estghCMeonoktst12CDcmyz09deiscd2cLMCMsC5601KLktisef8iLMlm" + "akmnQRmwcdkuwG1bnofgdeOPGQmndn89deuEEOFPRSISyIoynxeo4eeooyyIISwGRS_" + "124679adefhijloprsuvwyzBDFGHJKLNOPTX", + "VWTUvFHIabSTnoHRakBC6723wxhiyzpz34UVLVrBuDdnvwJKFPzJ457hrsQRvFTUFPEOajqr" + "0156BLAKlvCDbcpquvRSopwxoySTvwmweouDoyMNxyjtTU781buEcmwx0ahrrBajMWAByz4e" + "QRgh23BCJKxyKUyzmwEFdeCD6gABtDblzJijhilvijVWqrfp12rsAKnxqrjtdn3dyz23mnbl" + "degqqA1blmkuktAKabkuUVuE1b9j12VWUVajkljt23JKKUJKtDDNNXkukllmijhighhiij_" + "01245678abdghjmnoprstuxyzBDEFGHIJKNOPQSTUW", + "uvQRTUmnUVrBgqPQJKghSTIJKLvwpqklRS6grsopqrwxjtuDpqoy1201lmKULVbcJKxydn45" + "mnxHuvCD6778zJMWGHnoBLLMCMdnFGuD09SThi01op9jCDijyzHRajkuuElmJKxylvWXisuD" + "vwJTzJ12cd4eJTuvrB2cnxVWdnkt09CMLVVWMWCMBC3dnxuDuvklqAxHQRdnABnxgqvwqACD" + "56BLHIIJLMABgqdnwx45RSMNqAoy6geogqlm7h4eIS45fgLMoyAB01yIeonowGnxISGQHIPQ" + "xHOPOXghno_2356789bdgjklmopqrtuvwxzBHJKLPQTUVWX", + "OPLMrsUVghoyuDuvstktEF34JKPQIJCD67FGqr5645rsakRSjtVWtDvFKLwxxyxHHIyzefzA" + "zJJKABfgpzST78efMNCMCDuDuE67wGEFEOOPde09LV5fbl6gHRRSISIJzJzAqAgqfgefeono" + "nxFGlv01blhiBLcmmwwGGHHIijku5fuvLVTUbc2c12hiOXUVHRabcmgqmwjt3ddnuEEFFGwG" + "vwEOENDNtDjtajakkuMW893d09VWNXJTIJyI0ayznxku89akDN8iwxxHkuvFUVisuEuvABBC" + "CDuDuvvwwxxyyzxHsCisop0afpqAfppqgqgh7h6756vF8iop", + "KLQRuEyzab89qrijhrhiwxmwbcBCxyABpqgh23sC7h098iajPQakklcm34STmn56TUWX7845" + "12gq89ghRSlmnozA01wGJKfgyzFGisIJktopmwGHrBklvFVWOX233dHIBLGHuDmn12CMlmwG" + "2crBijblIJ6gmwGQlvsCwGblmwabhi8imnJT0anoCM56CDeoopLVBLhrFPpz7hvFzJGQuD01" + "WXfpLVpzOP4eakFP0azJuvkuqAOXfpisJTdn09STnx5fRSuEqrxHrsrBVWnxhrdnLVBLrBzA" + "BLfpLVTUKUWXTU_012345789abcghijkoprtvwxzBCDEFGKLMNOPQRSUX", + "blktOPlvLMlmOXvFCDBCEFGHENmnlvABJKxycmMNHIKLFGrBPQhrEFakISdednGHBLyIEOFG" + "wxqAEFENeorBde4e34ijISabvFvwwG45opajblefbc7hHR78RSKUaboyEF09KL6756ghNXmw" + "DNGHNXnxhiijfgEOQRFGtDbccd45LM01CDvw2cRSpzuvghfpFPJKBC12LV2czA6712STgq01" + "78pzBLhiABCMIJ8iRSisuDCD8iCMHIqABCWXcmCMOXOPPQHRGQvwOXtDHIcdwGjtIJtDzJgq" + "WXGQMWuDCMQRBC_134789abcdehjklmnoprvyABCDEFGHIJLMNOPQSUWX", + "qr5fSTuDHIvwEN89lmIJCDOXTUnozARSNXeors01lvvF23CMuDsCCMopQRpqktPQuvqr78qA" + "KLDNFPKUrBBL4eLVyzdeGHHIzA121bxyHRklAKrBblnx8iKLlvJKbcabijyzOPhighkthrrs" + "VW2ccdajFGcm7h6gIJHIdevFuDUVCDopKLBCmwfgefGQPQFPwGlvVWqryIJKGQmwBLblxHCD" + "fpWXrs1buDijabghmnGHrBBLxHisajoyijKLhieooynxdnKUnxxH3dAKqAyIISgqqAAKSTRS" + "HRGHwGmwRScmmwwG_034578cefhjmnqrstuvwzACEFGHIJKMNOPQRSTUVWX", + "CDstFGfgKLqAABBCsCrs89mnIJabHIefwxxyak23bcSTopuDCM12degq0aWX2c7h676guvVW" + "MWLMHRlmmncmcdUVnxEFeoQRpzzAqAMNxHvwoy3ddeopfpeftDuDkufgNXcduEEOhrpzzJst" + "ij1bklbl1bktakaj9jtDgqlvklku7hvFJKfpOPOXbcabIJjt5fbccddnDNvwpzwxvwDNuDuv" + "vwwxnxnooyyzzJJKAKABBLLMrBKUAKqA45NXKUktBL3dfp5fCDissCABfpispzJTzJzALVAB" + "rBhrBCBLBCJTCDDNstrBKLJKIJRSISIJJKKLBLrBrssttDDNNXOXOPPQhr7h", + "RS67abPQefST09OXCDpqHIOPkldeIJqrQRmnPQcddnJKxynxnooppqqAzAEFENMNMWWXOXOP" + "2cbc1b78aj3duDVWlmktrsstjtijhiLM45WXcdOXuv56qrFGwGvwEFfgDNuDuEsCmn01CMCD" + "rsku67tDNXoppqqAzAyzefxHlvAKgqjtHRDNfpJT899jijnoGQis09cmzJmwMWpz5fktqr01" + "GHFGvFvwwxoppqqAzAEFkuyIuvEOeoeffpvwuEku4eAKEOoyNXblLM6gklgqpqnx6gxHBLkt" + "pqqrrBBLKLJKzJKUeoyIstjtijnxISGQ8isCoyis8iyI7889bl78sCoyeo4e", + "67sCuEWXAKwxuvopCMFGno12mnOXxyFPKLyIijISlmmwvwcmzACD012cyzvFUVtD34STqA3d" + "ajEFab45aknxwxmwcmcdvw4eMNAKKUrs34IJjt12mwEN8izA563dpzktMNMWWX5fFGHIFPzA" + "LMMWis9jtDsCdnqruvjtaj45cmABkluD78ij2c09zABLlmopQRCMsClv67zJIJfp8ieopzcm" + "is8i9jfgnxoy7h09mwJKsC2cWXhrCD12ghyIrsijuDPQAKrBOXBLwG7hGQhigqhrISwGOPuE" + "qAEOAKuEoyijWXmweo_0123456789bcdiklmnopqsuwxyACEGHIKLMNOPRSUX", + "bcpzstgh56OXklrsoyjtpqeoqruvyIRSkt348iFGoymnlmIJstQRuDop23kl45OPpqkthino" + "PQWXhr344edezApzrB09JKOXKL89MN12dnisDNOPAK78sChibl09MWklopVWCMUVnxRSBLGH" + "dnmnxHfgWXlmkuakVWzJ56LVblabWXaj7hijbcisnx3d67uDcdhr8iBLxHTUhihr45rBBCcm" + "ghCDijuDSTab1bajOXijOPhikubl7hHRhrxHFPlv7hvFOPmwKLqrghrsfgLMlvefMNcmblLM" + "JTqrKLJKAKqAgq6ggqAK_0234689acdefghijkmoprstuxyzBCDEFILMNOPQSUW", + "uvvwrsuD3423EFwx09uvENfgefbcMNcmkufpstQRRSktrBpzLM459jzJTUPQOXyzpqvwrskl" + "UVeomwAKhrOPst7h01jtlvzA12vFISuEgqLVhiWXDNSTABqrdnOX568909rBtDktxyyzxyrs" + "DNwx3445klxHHRnxyIakMWisefnoTUghajWXmnLMJTlmxHMNcmENoyRSyIzJ9jpzMN01kuzJ" + "HRLMjtfpmwJTfg6g562cgq6gfpAKdnabST0a9j893dqAOXOPAKKUcmAKtDgqwGDNghNXwxqA" + "DNtDuvvwwGuvwxxHnxdnnx34_012348abdefgijnprstuvwxyzBCDFGJKLMNOQRSTUV", + "uvlvklENIJOXpqVWabIS09KLhiTUOP12wGGQPQFPvFUV3dJKwxxyoystrsuDMNopyIijoy7h" + "wxnxmneoqrisnoVWLVLMHRdnWX8icd01uvsCqAbcVWBLrspqabCMghisuEHIIJKLlmSTmwxy" + "ENefdeefzJAKoppzzJhifgyzzAyzEO09RSgqrBABqAyIoyakefktnxdndeeooy0aSTkuajak" + "ij6guEsCxHabkuOX4enxTUefdncdcm2clmhiHRbcUVabGHxHxyyzfpVWpzuEyzfpEOxyuELM" + "wx5fMWLMhrfpwGvwlvvw_2379abcfghijklmnpqrstuvwxyzACDEFGIJKLNOPQRSTUVWX", + "uvuEvw45dezAefqAmnuDlmklhicmQRgh34uv78KLmw2cLMdnIJRS09gq890aajwxwGPQuDCD" + "OP6gijHRvFCMhi7hbcabpquDxyUVdekucdGQlvwxajSTJK78bcMWCDde67KLNX1bmnhroyyI" + "xyxH9jlmabmnjtnxuDISklGHvwMNeoijBLajoywxyIISoprBAKzJblxHlvklkuBLHIDNMNij" + "vFLVFPdeJKcdpqis78IJvFHIdnpztDBL23jtrBbcop12hrtDnxsCnouDrBTUKUKLzA23BLrB" + "oppzTU9jfp5ffp1b7hbllvblhrrB_015678abdeghijklmnoquvwxyzCDGIJKLMNQRSTUVW", + "yzGHBCkt09LMMNENhi45UV7889lm56cd67ijtDuvde9jST23EFcmjt34klNXWXMWHI4eHRDN" + "lvsCmw6gAB45cmOXNXENkt23opvFVWbleo5f12fgCMKL78lvghvFhizJMWWXisRSFGBCGQst" + "1bwG34abtDuDCDgqrs23bckuTUQRmwjtPQJT9j3445uErBGQUVghdeRSakhrIJrBjtBCCM12" + "cdLM67LVBLuDkuEFsCrBGHuDDNENhrpzfpfg6g677hhrrBBLKLJKbcde8iis8iHIxHzAcmmw" + "sCIJzJnxIJzAISgqqAgqabGH_" + "0123456789adefgijklmoptuvyzABCDEFGHIJKLMNOPQRSTUVWX", + "01jthiopijxynomnzAghtDMNajhr34LMhiPQklakabijfgCMlvAKqAbcvwNXxHUVrB7856gh" + "45isefuDDNpzHR5fcdJThioyKU09uDNXzJJKAKyIwxMNbc89uvKLku3dVWghsCvwktFG56st" + "klblUV09GHcdENdeajFPOP0aisIJab1bTUUVajfglvsCjtHIktnofpMNoyeo8iklyImwoyST" + "cmJTstCM4eeonodnrs4eisxymwnxLVblsCVWdnFGxHEFBCnxlv1bBLJKFGzJstFP3dpzvFFP" + "12LVlvzJENDNtD1bbl1bDNENBC_" + "012346789abdefghijklmnopqrtuvwxyzACFHIKLMNOPQRSTVWX", + "45HIktfgGH348923bcFGcd56EFJK097hzA4534121bbchigqgh7h6756454eeffpab23MNhr" + "opfgLMcmUVwGFGvFyzIJHIOP899jijyIdnnoQRhrrBABzApzopeoeffgBCFPKLJKAK7h5fRS" + "zJmwqAcdcmmnCDtDstDNGQQRHRpzOXuvvwvFFGTUwxCMuDLMlvvwmwiskunxzJNXMNMWsC01" + "VW8ihr7huv12WXDNmwKLbcoyEO3duECMGQSTwGdnmwJTissCisCMGQlmOXWXMWMNENzJHILM" + "ajabakkllmcmrBLV3dyIyzoyop8iblvFktkunxlvblBLIS010aabrBfpajpzLVfp5fKUHIxH" + "EOvFuEEO", + "uvuDcdFGbcdncdcmJK01delmIJmnmwwxHIzACDvwOXefnouvcdMNuDGHLMABEFvFWXBLyz89" + "kllvuvENmw5645nxCMCDDNzAxyyzzJIJHIopmwwxnxcmvwpzqAlm78HRAKnouErs675ffg6g" + "ijxHgqOPkuJKmnakghhi8i7867nxtDfpsCeoIJpzjttDVWzJisqAEOuEku8iENTUlvpzbloy" + "QRFPfp09vFwGqrlvlmcmcddnnxxHHIISRSQRGQwGfgSTAKEFyIoy9jsCeo4eMWqACMLVeovF" + "1bISFPoysCBLyIoyhrissCeoAKrBhrrBBLLVVWMWCMsCisjtIS4eef3dtDNXdeefDNtD3d8i" + "fg23TUNXjt", + "12qrajKLstkloyoppqqAzAxyfpabBCbcwxUVVWhiST1bLMpzijyIIJgq23xyABrBqrTUENde" + "JK56uv67gqvwoy0112dnQR2ceo45OPuD56UVGHOXblCDBL3dak5fpquvuEmnGQrs676ggh78" + "HIqrdnIJEO4e23MWnxmnmwvwxHpquDfpCMuvzJlvLMnxblfgdeRS09dnsCMNnxpzrshrhiQR" + "zJSTwx1b12wGblefvFRSvwpqxyCMMNMWWX01wxxy67VWvwgq4evFFP6gpqgqqAKLPQFPTUlv" + "AKvFJTlvPQKUqAgq6gzJpzblzJ67ab09_" + "0123456789abcdefghijkmopqrsuwxyzABCDEFGIJKLMPRSTUVW", + "nopqzAhiop78dednnxxyoyABuvTUBLst89kt01abajvw67klefghbc34ijOPuDDNCDCMuvgq" + "MWuDisvwuvkuakabbllmwGxH12PQQR5fvFRS1byIoy238iblpquE3dstispzIJfpdeLVyIwx" + "uDuvvwwxxyyzzAABBCcdefhi6gST0aISyzyIfpnozAABBCsCstjtajabbccmmnnooyCMGH0a" + "yzFGdeBLijMWtDwxLVlvFPbllvvFuDGQjtWXtDrBuDuvuDCDisvFvwwxxyyzpzfpfgghhrrs" + "sCCDuDuEUVVWWXUVNXfpDNisstqApzAKHIIJJTTUKUAKqAqrrssttDDNNXOXOPFPFGzJ7hpz" + "hr9jfp5fjt9jrB", + "5ffg6gIJ23GHENrsabqryzrBxyzJ45PQ12pzyzoyJTbccddnfpOPvwpzisBLQRLVijABrs34" + "lvuEsCCMaj23qAnowx45gqxHHIyIAKNXISEN34qAMW01ghhiJKNXyIlmEFnxFGakENijgqfp" + "CDgh7h675fmwBCfgMNENuEuDCDBCBLIJfpajwxoyEF09hixHGHHIKLak12AKHRiskusCqAgq" + "LMCMklsCqr3dnxlm9jyIdnnxisMWcmjtzAoyeouDENGQ9jtDyzopabktzApqqrMNxHENijlm" + "wG9jPQkllmxyEFajmwijxHvFabcm2cwGxyrBcm_" + "12356789abcdfghijklmnopqrsuvxyzACDEFGHIJKMNOPQRTVWX", + "WXEN01mnTU4567EFvwlmnohifgklBCbl56UVqrcdIJOXCDFGrsRSsCyzwxdnijSTbcCMgh4e" + "ab89hiHIyIxyOPnxzAktakajijisKUyzoyeoVW3dUVqApqpzzJJKdn09KU3drssttDuDuvvF" + "FPPQQRRSISIJJKAKABde2cGHsCMWCMqrBL5fyIVWrsjtmw9jsCCDuDEOzJuElvbcbllmcdmw" + "abCD0ajtvFDNNXWXVWLVBLBC1btDfpblpzdeoylv34fpEO5feohr7hfgoyFGwGvwDNGQwxyz" + "zJjtHR9j23EF89677h12qrhr7h788909011223344eeffguDCD67BCqAzAxHxyyzzAABBCCD" + "uDuEEFFGakqAkuakwx", + "01ktLMCDOPijcdde4e3423121bEFEOfgghpqxHxyyI4567eokuGHbllvFG4e78OP89uEEFvF" + "ST5fvwiskuISfguDuvuDCDsCBCrBwxyzabCMnxis098ixHnxTU3445KLFPyI7hghlmklABjt" + "isSTMNefhrLMbc9jdnabderBzAJKoyIJktjtajkllmgqmwhrbc7hmnyIcdBLISMNrsNXwGmw" + "mnno2cyIyzzAqAwGUVqrENAKcdKUAKrsGQLVoppqdnGHHIstnxIJHIxHzJHRnxdnGHdeeoop" + "TUABpzde3dBLdersLVNXeffpUVpqTUqrqAzJrsgq6g_" + "012345689abcdefghijkmnpqrtuvwxyBCDEGHIJLMNOPQSTX", + "45CDBCST34rsQRRSrB09ABPQWX01EF3d5ffpISpz1289STvwQRefbl9juvghOPklwxvwvFFG" + "GH78238iHRfpisGQPQFPnx890akuuEuDCDBCBLLMMNdnAKqAcm8iTUDNCDCM3dsC09yzlmhi" + "EO89lvAK7hvFgqrBzAhrKLRSrBpquvBLwGABLV67VWSTLMblvwKUKLLVzAcdyzde56xy45nx" + "5f78mn01mwENisvwQREFxHakyzENhiuvVWHRzAkufgak67tD12ghCDktNXBCABBLfgakktef" + "hrrBzAhryzBLyIHIst7hhi8i5609898iis8i890901_" + "012345678bdefgiklmnprsuvwxyzABCDEFGHIKLMNOPQRTWX", + "klbcHIjtTUkuzAEFFPOPcddeRSQRSTvwxytDlmyzVWpzfpopeofgyImnISGHWX5fvFUVNXVW" + "lvoyyIjtqAGQghbloy89klajwxENlmRS4509uEKUvF01vwrsiskugqUVijSTISIJ0auDwxeo" + "zJJTajklis8i56CD3dwGpq6gblyI237hTU89qroyuDyIAKnx12norB342cop45ijcddnmn12" + "mwUVBCpqno01gqbc4e78cm09BL0112ghmwzJwGGQhi23CM3dPQkupzhrzJghmnmwfp5ffggh" + "rBdeGQefwGmwabABijajAKmnGQabnohrABfpopno_" + "013456789abcdefghjklmnoprstuvxyzABDEFHIJLNOPQRSTUWX", + "WXTUISKL34wxcdJKABlmfgBC67opvw3dispqmnajLM09KLzJJTrsyIuvMWLMLVsCisRSjtCD" + "uD78GHBLrB45LVBLDNIJtDktMN9j01JKFGkllmGHNXcmvwHIAKabakklfpIJlvEFENuEISoy" + "JKyIBCvF0912kuIS2cKUFPcdxHqAqrvFrBBCwxQRakeoHRhrEF0auE7hnoFGopakxH6gvw34" + "GQFGnxUVijSTAK3dgqwxHRnxCDnocmpq67hitDoplvijBCABTUblKUlmklAKvwlmmnAB9jno" + "mwkt78uvBCmn89vw78uE67CDdn6gghmwvF3dEOuEcm34_" + "01345679abcdefgijklmnoqrsuxzABCDGHJKLMNPQRSTUVWX", + "EFxy56uErsvFdeLM67STstwxUVefklKLkttDQRDNlm09lvMWLMLVOPHRRSISzAijajTUabAB" + "nx89bllvxH6ggqGQIJ1bJKUVqAqrrBBLKL0a4eNXpzMNakhiKUnozJcmdnfpvwijHIFPktHR" + "EFFGisdepqJTsCnxstEFxHuDENGHpzrsuvyzdnblklgh78JKAKzAIJ9jCMBLkunxyIlmoybl" + "uD3dabHIDNwxrBstjtaklveobc2cTUbcblkttDlv9jDNABNXjtPQvw6grBOXhrWXVWGQ8iOP" + "rBstUVdnVWoy45343ddnPQ3d34wx45KUAKBLqAgqqAKUnx_" + "014678abdefijklmnrstuwxyzABCDEFGHIKLMNOPQRSTUVWX", + "12klefyzOXpqzAmnWXIJnorsMNopEFlmvFABfpqr09gqkt45deBCENRSQRyIxyxH56yzpz6g" + "JKstzJ2334BL01ISklKLLMmnJKKUTU12isrsIJeoFGisij9jakEFyIoynonxxH5ftDkt4eIS" + "fptDDNopjtpztDsChiisstjtajabbccmmnnooppqgqHR5f0anxzAyzxHxyEOakyzlmzAqAqr" + "AK2cqAdnJKBLqrgqghrspzmnrBBLKLJKzJpzpqlm7hOX09lvhruvzArBLVCMLMBLrBrsyzvw" + "7hwxuEuvvFxyFGgqopFPLVGHyzMNvFbl78673ddnuD3dmnlmmnnooppzzAqAgq6g67788909" + "011blvFPOPEOENDNuDuvMNMW", + "yzgqghhrpqbcyIKLcdopnoOPisxy89PQQRRSEFdeST2378abwxbcgq09EOrBrsJKstmncdmw" + "NXLVwG5fGQBLISakrB7hghbccmlmmw4edelvhiMN562ccd3dvFzAHI0aab1bLMpqopyzIJuE" + "nxJTKLzJoy89bckuefdnblajnxLMTUabuDwGJTeoGHDNHIfp5fkuIJFPjt7hJK1bajEFMNMW" + "lvblqrQRefHIak78FG1bmnlvdetD56rsDNkuGQnoQRpzefqrnomnmwwGGHHIIJzJpzcdcm09" + "0auEdeAKmwEF7huDjtef899jjtFGpqfp5fvFeftDhrKU4eAK7h_" + "012345689abcdeghiklmnopqrtvwxyzDEFGIKLMNOPQRSTUX", + "8iyzzAOXSTLMBCPQAB09xyKL67vwJKopoyyzzAqAMNOPENNXOXuEEFvFTUqrrBQRPQGQwGis" + "stktkuuDCDJTmnmwwxvwOPgh45zJpzHIxHwxBLnxfpdnCM2cvFRSbccmyI3dhiaj0amnij56" + "ef4e45oydeeonocd3d23fgghbcisAKKUrBhrrBBCklblabABzApzAKqAfpop126gnxBCsC9j" + "tDktkuDNNXHIxHnxnooy34jtIJ23WXtDDNgqhi34NX2cOXENqAmnBCABJKzJzAMNBCmwCMsC" + "KU564eeooyPQQRHRHIyIoyeo4e45566gghhiissCCMMNNXOXOPOXVW8iWXOXUVDNtDEOOPFP" + "vFlvlmcm2c1201099jjttDDNISmw", + "ENAK23KUJKJTqrqAABMNklFGef89lmfgEFLMblnoBC2c121b7hyzCDopoyBLyIuDmnuvLVrB" + "BCwx09xyuE0aabwGwxKLFPPQGQzJyzmwktcmkl892ccd3dDNtDzAuDxynxvF78bldnlvAB34" + "vFbc3dCMvwrBeoyzCD7h6gtDghFPhr7hrBakajjt9juDijpz45zAuvfp5fabvwoyxydnblBC" + "STajOXCMstMNvFlvWXVWLMAKENmwijGHvFOXmnnoFPsCCMopdnnowGmwvwmnnouvNXOPsCis" + "pzuEuvENISvwwxPQMWxyyIvwQRabPQOPuv8iuEajEOabuvzAISvw_" + "01234578bcdefgijklmnopqruvwxyzABDEFGIJKLNOPQTUVW", + "uvJK12bcCDuDENTUajIJcddn2cMNABijDNSTLMRSLVlvUVaknxyIblku8iQRWXnoTUMNop56" + "lvkl67EFSTNXgh1buEuDblrsOPFGoyyzPQhrEOfguvuEEFajMWxH1b9jHRBCHI12VWzAcm09" + "ABKLQRjt23lvLMBLKLRSrBDNGHiscdQRMNKUFPvFab89uDmwoyqABCwG01ABeoAKbchrGQTU" + "ab7hCDoyuDyzxyKUBCqAST78tDDNJKFPNXghtDrBKLCDjtuEuDCDlmLMyzKLJKsCUVklVWis" + "hi7h78lm5fIJhrpzajrBpqqr9jpqghcmmwpzajzJwGfppzzJpzfp_" + "0123456789bcdfgijnoqrtuvwxzBCDGHIJKLMNOPQRSTUVWX", + "WXvwUVBC09ijabcdajEFHIbcABlmmnoppqqrrsLMMNklVWFGstij89deghhief2c4eNXfgGH" + "xyyzuvtDzAlmABqAqrrssCAKisJKMWblIJJTSTRS6gBLmwcmDNgqghhrrBABnoktCDsCstvw" + "vFFGnxwxmwDNNXOXcddeKU7hcmuDfpefdednnopzGQ9j5fjtef2cktxy3ddeMNef1bpqgqfg" + "pzyzuDuEENMNCM9jklJKzJzAghhr5frssCCMBCBLKLKUUVVWTUJT8iGHwGwxxyyzzJJTSTRS" + "lvWXisblvFlvUVxH1bvFvwrBFPBLvFuvqALVrBVWgqku8i0aqAakFP0almzATUklkuuEUVyz" + "KUxyOXEOuEuvvwwxxyyzzAAKKUUVVWlm", + "FGlmHIJKef12zAxyfgIJENKLwxyzRSEFGHghklgqstcdFG56VWUV09hi34abHI6gLMqAJTde" + "xyBCuDCDCMMNENEFvFgqIJsCvwAKisbcGHwx78WXzAABuv23PQmwcdrByzwGzAkuxyOPcmgh" + "HR8iAKnxjtzJdnopBLuEOXGQVWeoajGHFPFGGQpzMNrBzJGQEOJKJTTUKLLMvwwx45EF2cMN" + "qAvwHInx0aBCmwwGJKBLuDUVmw344eNXyIoyeofpfgefblGQISbccdMWSTdeGHuvblef5fdn" + "3duDopdnlvvFmnlmlvfpfgghhrrBBCCMMNENEFvFlvlmmnnoFGDNsCCM7hsChrrBBLrssttD" + "DNENEFFGGHHRRSSTJTJKKUUVLVBLMWAKqA", + "QRzA01ghpzopklRSUV89pqhieo12qrWXABoyTUSTrsOXijajoppqqrrBBCCDDNENEFFPPQQR" + "RSISyIVWWXhr7856dnuvnxNXdnmnIJ67EN458i787hLMBLBCnoissCtDDNMNCM9j898i2334" + "3djtcdtDop0akuBCuEENak56459jkujtmntDMWuEvwlmlvvFxHwxwGdeLVBLfpHR5fDNtDjt" + "ajakkllvvwwGGHHIIJJKKLLMFPnoblnxIJnoktopMWfpstpqmwwGGQnocmxHrsstktkllmmw" + "wxnxnooppqxHlvabvFHRhrHIhifgefdeghhiijajabbccddeefabhi6gghhiisijajabbllv" + "vwwGGHHIIJJKAKABBCsCcm1bKUFPBLrBBL", + "wxBC45EFnoklSTktoppqghCDhiblefuvmnOPstPQFGkuQRdeENLM09qA3dJKabghfg6g56KL" + "OX01bcMNOPabefEFgqLMIJJKwGFGvFzJVWUVRSnoKLoyoppzmwuvENajMN67WXwGDNyIABAK" + "JKzJqrqAABabPQ23OXFPNXCMLMBLfpoyJTrssCTUEF89cmpqABxyvFqr7hLV3445CMmwijtD" + "yzqApqpzcmuEbcwxcmeolvAKbllvvFFGwGmwcm232ccmGHSTzJwGuDmwcmbc2c12JTab9j09" + "0ayItDjt9jijisrshr7h787hHRQRGQsC8iyIxyxHissCISyItDyIISSTTUKUAKABrBhr7h67" + "56454eeouEdnBLnxrBzJdnBLCMMWHRLVxH", + "qryzsttDcdxyTUIJhrrs34vwMNwx45HIEFLMuEFGENABvFEFuESTkulmblRSjtstktklKLlv" + "wGFGvFJKmnmwwGGHxHyInoWXNXMNLMKLKUUVCDuDcmopCDmwpqfp2cmnCMgqOXqAAKpqjtuv" + "wGmwis8i899jajabbllmmnnooppqqrqAijwGrBeode78zJ12ef6gcdsCdehrMWoyfgTU89LM" + "gqKLLVUVSTpzCMefmnTU5ffppzzJktstISsCkt3dSTeomndn3d23121bblHRnxMWxH5fHRRS" + "WXMWoyCMsCqAgq6g677hhiissCCMMWWXOXOPPQQRRSSTJTJKyIaj0a09898iabKUEO4euEnx" + "ak0adeakkuuEoyAKbccddeKUABeooyEObcOP", + "rs45sttDvwWXAB56lvFGwxcdvFBCaj23GHwGmwmnnooyxy12abHI67IJyzdeefbcMNEFFGkt" + "wGJTzAcdENuDdnEFdeeooppqqrhr7h788909011bbc34FGUVyI45bc4ezJsCISnxdnRSnxgh" + "gqqAABrBIJJKKLLMNXhiCMsC67stBLrBOPqrfgnoVWktkllmmnnooppqqrrsISjtoyIJJKtD" + "jtKLKUUVDNxH9jBLyI5fijAKisajFPTUfpgqqA5fsClvakMWQRISRSHRIJSTtDGH09NXCMCD" + "sCisijjtuDuvEOuEkuak56ajpzhiJTTUKUAKqAgqghhiijajabbllvvFFGGHHIMWzJ452c23" + "34455667788909011bcmnx3d2cdnnxpzEO3dcm", + "CDlmfgBC1223mnENuDghno01cdMN09TUhiuv34vFOPyIIJJKKLLMMWWXOXEOENDNuDkukllv" + "vwwxuEoy89eoopstdn45pqktuvnxHIRS67qrIJxHSTKUvwrs2ccmgq2cQRCDHRqAAKJKuD12" + "jtkuBL6gKLxHnxxHHIyIoyoppqgqghhrrBBCsCstjtajakkuuvvwmwwGPQRSabCMij7hQRGQ" + "hisCRSdnwxJTSTIS5fyzxyHInolmijajabbllmmnnooyyzzJJKKUUVVWMWCMsCeoij4eeoop" + "fp5ffgqAefwx0avwOPNXKUlv3dakMNcdkuAKEOuEkuak0aKUabLVbccddeeffgghhiijajbl" + "KLqAlvuv9jtDjttDIJJKKLLMMNDNuDuvvwwGGHEO", + "zJpzfgfppqab78qryzajQRzAbc89ghcdcm349jop6gLMmw4eLV56hiRSrBpqnoUVwGopTU45" + "FPEFPQpqqrab67gqajqAIJJKmnnoeodecdENopgqMNlmeo23mnbllvAKijBL6gCMstgqBCLV" + "1brsabyzvFDNjtoyKUpqNXyIwxeoqrOXnonxsCBCrBvwuDuv3dmwtDopHRyzdnstMNDN2cis" + "bloynxghLMNXblbccmmwvwfgFPISakzJKLIJdnMN3d2cISblEFefxHkuHRJKuE4eakyIvFef" + "wxfgISDNBLyIuDFPvwwxCDsC8iajhiissCCDoyuDuvlvblab8ibccd3dcdbc_" + "0123456789abcdefgijklmnpqrstvwyADEFGIJKLMNPQRSTUVWX", + "IJrsvwABefzAuvMNLVstHIpzuDbcOXqrdezJGHCDpqqAzAFGGQIJ78cdvFtDST4e1bbc2cQR" + "op898igqlv9jcmFPhrmwvFkude89BLghrB01hrrsisBLEOnxeoqrjtpqsCrs7h233dabfphr" + "JK0901IJ56ktkuTUakajuExyqrVWbcvw6gLVwxoyvwuvqAuDvwijUVcm12mnrBnxENlmBL01" + "wxvwHIOPuvrBPQdnLMKLQRabbcuEkuAKeodegqcdENqAbcklabRSMNAKktlmVWUVMWstKUAK" + "gh0aQRtDPQNXDNCDBCNXhiak01kuOPghuEgqEOuEABpqBCefCDtD5fkuak6gefst_" + "01235789bcdefghilmoprstuvwxyzABCDEFGHIJMNOPRSVWX", + "uvqr7889klIJHIvwLMOX0112ABQRBClmGHmnktFGEFEOOPPQklRSSTwGmwyI9jHIBLKLJKIJ" + "yIyzzAcmijissttDku34hiuEWXrBMWBLvF56vwOXzJzAAKWXHRrBajqArsJTQRAKakvFwxFP" + "67TU45lvOPisqrfpbcpzvFlv78568ifgef4ePQxyOPyzghmwnofgeoeffpGQijOXhiwxpz5f" + "fpoppzyzhr0akunocdbcbllmmwwxnxIS67wGxHjtyIDNtDlmakkt09kllmnooy78DNISSTno" + "strsmnlm1bakrBLVBLLVrBhihrrsstjt8iissCMWCMMWsC6gis787hhigqdn89qAAKbllmmw" + "wGGQQRRSSTTUKUAKqAgq6g677889090aHR2ccm1b2c", + "wxajijhighJKjtLMgqtDstsCqAcdBCKLbcmnno9jcmakmwijkukllvLVdewGCMuE34hi2chr" + "blcm45kt56mn2cAKxyoynoOPWXrsyzghoywxvwkuuvjtgqTUFGzAyzefxyqrrsakHINXGHis" + "uDijnxdnVWnxzASThipqFGrB67tDvFTUGQ3dRSyzwxzADNdeOX7hhrrB4esCqris8iisrsqr" + "wGqA45jtuvzA56UVNXKLyzcdkuGQMNstuv6gPQJKxyTUxHbcblHIIJsClvJKKLblbcLVFPCM" + "STnxgqIScdeodnnxBCoyOXeo4eABQReoyIoyHIGHFGGHHIISeoRSSTqA4eQRABBC_" + "1235679abcdfghijlmnopqstuvwxyzACDEFGHIJKLMOPQRSUVWX", + "rsvwOPfgabABWXyzwx45blxyuDOXUVzAVWKLBCPQ56EOoyWXLMLVVWvFstghyzQReovwABno" + "lvkuzJtDwGrBqA78JKrs4eABuvvwkukllvrBwxbcgqhryI67kt01mnBL6g1bSTGHcdbcblLV" + "rBdeHIuE89hiKL8iyzHRxyIJzAhrpqnxkuakcdefMNxHmwku0akluDlm7889AKuEcm01mwEF" + "qrKL9jFPENktCMMNLMmnHRcmEOEFeoMNmnrsnxCD5fENef09DNRS011bxyoy89eotDDNyIjt" + "oypqHRFGeoopdeEFGHtDFGHI3d23vFktyIklGHHRISRSlvblvFdeENDNtDjt9jpqjttDoyDN" + "EN_045689abcdefghikorstvwxyzABCDGHJKLMOPQTUWX", + "CDBCKLEFyznoDN67LVVWWXNXMNmn34xy2cyzdnBLABAKqAblEOhiUV7hmwcmTUijrBrsdemw" + "cdbcjtdeHIIJVW23ABtD9jqAlmefjtJTENFGPQmnlmbllv12op1btDxyqr78st34blabUVEF" + "QR0a45DNVWGHpq56lvCMHI4ehrUVyIzAFGvFlvkloyeo09FPghcdajdeuEhiwG01yIfgefPQ" + "deOPPQvFcdbcabGQakktlm0aSTGHstHRsCajBC23TUABqAnomnijKU12AKlmoplv34qAzACM" + "uDhrxHqrpqtD4534dnjtKU9jpz3djttDuDuvpqzJdnpzhr5ffp5fpzvFzJnoisFGnx8ihi_" + "01245689abcefghjklmnoqstwxzABCDEGIJKLMNOPQRSTVWX", + "IJfgvwwGwxxHxyPQGQrsOPghvwwGFGEFuEOXyzEOopCDuEENDNstpqKLqrlvJKrshrrBBLcm" + "bcabajijissCCDtDktklcd1b122c564567787hhrrsstktklblabaj9jRSHIwxIJNXvFvwwx" + "xyyIHIGH01mwwxxHGHmwcmmndnoyxynxsCbcxHdnHRqAqr09AKlvrsstkt0aklakgq1bKUbc" + "bllmTUcddeef8ieooy3dopfpcdqASTnxzJisgqLV5fpzklEF8iDNtDnofpbcabpzzJmn5fMW" + "yIFG56jtDNMNxHlmmnnxxHGHFGEFENDNtDkt9j45CMVWijzAKUTUSTISyIyzzAhihrUVqAlv" + "gq34VWrBijajabbccd3d3445566ggqqAAKKLBLrBhrdnMWCM", + "67IJvwpqxynowxcdBCUVbcOXuDFPCMdeeocdghdnpztDabstajishrTU5ffpfggq2334efbc" + "yzabRS7hABpzuvakghvwQRBCku78DNxyzJjtwxCDABzA7hyz89xyVWyInxPQmngqcmISbc78" + "5689mwuEabxHWXuvnx0aopNXvFtDcmHRwGxypqSTbcrB1bBLABopyIoywxAKBCDNxH7h12IS" + "01HRuD4556aj0aSTakrBku01hrtDakQR7hCDfgMWmwlvghjttDCDjtbl0alvwG23fglmCM6g" + "12gq1bqAMWAKakyIUVqAGQgqbl0109TU896g6778vF89FPJTzJ09pzfp01wGVW1bpzzJJTTU" + "_01234579abcdeghijkmnoprstuvwxyzABCDFIJKNOPQRUVWX", + "45HIdeABnoENKLbcmnzAajjtVWcdvFabqrLMgqCDtDstrsrB23IJdnpqkt6ggqqAyzklzAmw" + "lmlvghgq12eooyeozJJTTUUVLVBLBCCMMNDNuDuEEOOPPQGQGHHIyIwGuvCDtDstbcisSTvw" + "uE093dmwTUcmFP7hvFkuOXpqUV78rBop01hrRSnxefde23efxHblSTmnWXakktnxlvvFblst" + "lm1b4eFPbllmmnnoeo4e34232cISVWxyEN5fwxEOOPPQGQwGwxxyoyoppqqAAKKLLMMNOXzA" + "rB7hABQRcmzAzJ0amnuEghfpEOTUJKKLLVUVTUpzkuakfp5fGQuEfg0aEOfppzIJHIsCMWCM" + "wGmwabsC9jMWijQRHRHIIJzJpzfpfgghhiijajabbccmmwwG9j", + "JKUVdednno12abhi01bcEFijTUKLoppqOPIJJKajstmn3dcd2cno8945HIOXIS56akVWBCrB" + "LMWXhrKLBLzAvFFPOPOXNXENuEvwzJrBABqAJTyzoyopzJfpuDpzisklFP0amwcmJKuv67KL" + "78LVCDuDdnvFCDBCwGnxeo7h1b09FG4eFPhr6712IJMWJKxHwxlvrBak5fBLKLcdJKJTJKbl" + "1blvKLLMbluvMNENqAzAvwwG3423sC0avw56ABCM34lvMW89hi45blsCBLzA78bcyIcdvF56" + "deMNLMEOoyeoOP6gefghhiijFPdnvFoyuveooyMNeoyIyzkuefakxyabyzfgzAqAABqAAB_" + "012345689abdeghijkmnopqrtuvwxyzBCEFHIJKLMNOPQSTUVWX", + "hi78UVqrVWRSij23STaj67wxrsKLfg12TU893478lm45mn56UVPQtDLVdeQRIJOXnoabENef" + "DN1bkuOPPQlv09gh6gfpsCstktkllmmnnxxyyzpzpqgqghhrrBcmmwrs23EFABJK2cBLjtuv" + "tDpzwGRSQRcmmw67GQCD4eyILMRSKLzJLMpzopoyBCzAuvvwwxxyyzzAABBCCDvFvwJTlvwx" + "bl4556677hhiijajabbccd3dFP011bHI01MWxyCMGHCMMNENEFFGGHHIIJJKAKqAqrrs9jMW" + "12steohrbcoy455ffppqqrrsstjt9j09011223xylvHIpqxHdnTUGHnxVWOXOPFPFGGHHIIJ" + "JTTUUVVWdnMNLMBLvFlvblbccddeeooppqqrrBBLLMMNENhr89", + "gq45zAktOPSTOXvwrssCUVcdrBABqAwxpqrshrhioprB8ihiijjtstrsLMAKnoxH0956BLTU" + "67dekl2c12010aabcdpqJKdnISHRrBblbccmkt2cvF9jKLvwlvhrstqrJKMNSTHI3445wxDN" + "uEvFGHIJzARSPQop78pqqriseoENEOHI4eOP2334WX8ipzFP1bFGBLrBGQQRrsoy78jtajak" + "stghfgbluERSsCcmijBLeoOXeffgghxyhiijzJ3dLVBLyzlv9juEEFvFlvblabak1bSTCMqr" + "pqqr09rBBLLVoyno2cUVyIsCOPmnxy67PQ0awGVWmwwGGQakPQWXbcOPnxabakabkuuEEOuE" + "bckukt_01245689abcdefghijklmnoqrstuvwxzABCEFHIJLMOPQSTUVWX", + "45PQQR56vwRSnostajLMJK5frsSTisEFktENuDKLyzuvzAFGpzBL8ihi7hlmmn894eBCGHst" + "FGwGvwAB23IJJKoyHIIJ09EFcddeopfpef01no34zJmwmnnxxHGHBCrBrs45cdfpDNpztDMW" + "MNENlvisvFfpIJlvabEODNstktFPGQHRLVAKKLLMCMCDuDuEEFFGGHHIIJzJqAgqJTMWsCku" + "AB6gAKqANXEN67pqak56KU67JK45238i4eIJTUeo89nobcopMNEFpq12kunxqrhrzAisnomn" + "cmFGuEkusCCM78676ggqyzzAqAgq6g6778890901122ccmmnnoGHRSMNENEFFGGHxHxyyIIJ" + "JKKLuENXQRAKLVakPQ89787hrBhr7h7889090aakkuuEEOOPPQQRRSSTTUKUAK", + "FGGHrsuDefUVcduvENuEkuktstsCCMWXOXJKbcab5f01qAajVWakHIwxWXMWqrwGvwvFyzOP" + "gqfgfp12GQxyBCuvKLAKABcdzAisrsCMyIIJeoMN09efmwoyNXqrvwnxgh4eyILMtDDNcm8i" + "JTtDNXQRPQwxwGvFGHmwstklktfpTUKL34eoqA9j2cAKlvHIoyIJjtcmwGuDJKyIsCCDtDxy" + "IJGQmn45eoENvwnobl56KLlvvwwGgqEOrsopklstQRabGHLVHIajak45klVWMWnoMNKL0aIS" + "9jDNSTnxdnRStD34STJT3dstJKdn67HIKLxHrsnxqAHIuDabij6guEajakuDCDmnEOBCABBC" + "CDuDkuakajgqlmij_0123458abdefgijkmpqrstuvwxyzACDEFGHIJKLMNOPQRSTUVWX", + "ajHIGHij56klfgnomnBCghTUABopzJpq01efCDlmkt1bklBCABrBqrgqfgfppzmwxyyzdelm" + "hibllvjtnxghvFDNFPtDxyabqAkl0akthr3dAKMWstEFefzJdnNXgqfgfp7hUVcmRSVWPQCD" + "CMMNFGUVklLVEOENopyImwEFLMST8iFGwGghTUOPMWuEBLuDUVakhi45ISfgPQGQkunxwGis" + "mwakblzA4etDABrBcmcdeoSTyz2c121buE34hr2334oydn45rByInoLVab0aab4eOXhiopcm" + "mnMNpzijblxHBLhicmeffgHIghdeDNNXhiVWnxuDHRxHDNoyWXlvnxLVrBtDOX8idnjtdetD" + "MNrseoisoy8iisrs_01256789abcdefghijklmnopqrtvxyABCDEFGHIJKLMNOQRTUWX", + "IJpq23ktmwRSfpST45bcOXcdghabqreodednbccd12qAJKHI5fhifgGHIJzAakKLHIst0alv" + "abABktbcBCQRklakabgqlmku0axHnxEFWX3dMNtD9jENjtEOtDRS9jrBktsCBLMWJKKUTUIJ" + "CMis8i787hhrrBBCst34HRDNtDdnijaj1bLVblabajijisstktlv89FGvFvwisDNmwGHwGop" + "cmnxmnmwcmHIOPxHnxpq23xHktOXOPPQBLST9jGQwGmwrB6g2ccmdn9j898iisst2ctDmwwG" + "GQOPuEOPPQQRHRxHxyyzzAqApqopeoeffgghhrrBBLLVVWWXNXDNtDktkuuEAKfpzJKUpzfp" + "fgzJTUyI5f456goy6734564eABeoKUrBhryIoyyIABAKKUTUSTISyIoyeo4e4556677hhr", + "EF67RSyzghlmklmnpqpzzAABBCCDuDkuktstrsdelmklxyefktST5fstyzENzAfgfpcdwxQR" + "deeoTU34xyisrshryz12nomnmwwxxyMNOX0109KLAKABRSnxKU9j898idnklktjtajabbccm" + "zArBLM23yz12ghxyBLhrnxrBCM3duvUVkuefEFuEuvfgefeoDNtD4eyIoyeodednmnlmlvvF" + "FGGHakktjt1b23bllvxHhiHRghstNXDNNXopqAAK0109hizJ3dJTLMUVLVKLakpznx0ajtIS" + "PQEFakyImnENlmLMMWopkuMNtDAKsCCMfgDNLMQReooytDyIqAMNENuEkukllmmndndeeffg" + "gqqAAKKLIS3dEOEFFGGHHRQRPQIJHIwGvw23MW12uvIJkuak012ccm010aakkuuvvwmwcm2c" + "zJ", + "cdPQabQRfgOPkl67uDCDtDstGHSTghJKoyuvbczJdeJTefrsvw56yIhicduEENisCMajEFjt" + "MWabdekuRS89eoAKuEHIpzkubcgqghhr2c5fzJEOfp09uDVWPQIJuvOPHIWXqAFGuD0aoy5f" + "AK45vw89EN23wxISEFfgstabyI78oyFGakzApzvw09blCDyIfpEOQRCMUV89IS670a9jGHHR" + "BCbcfgSTJTmnMWdnkuuDmw5fzJuvIJRSpzGQuErsxHnxfpLMMNEOABstISCMMWIJzJwGGQku" + "zAab5f56mwIJxH5fsCQRcmfpisOPbcPQpzQRjtajHRij9j89HIgqtDisDNrssCBCIJ1bzJpz" + "fp5fBLrBtDBLab67LV45NX3423_" + "02345678abcdefghijlnopqrstuwyzCDEFGHJKMNOPQRSTWX", + "KL01bcWXJKdewxuEEFFGGQPQOPcdMNcmmwsCablvEN8iBCLVKLKUktklisbcnooyxyxHHIIJ" + "JKAKqAgqfgefdecdcm09akopHRsC7hlvCM78QRRSBLstISktIJ1bvF67MW01wGlmmwKUstwG" + "blwxMWVWLVTUlvzJfphrFP121bvFrBrssCBLFPvw23cm2cwx34jtisaj8iHIyIxyMNpzrBBL" + "abHRbc01cmqrijajUVaboyghzJeomnnxLVpqfgxHrsLMxylmHRAKRSMNkltD9jstwx4ersNX" + "uEhrSTDNtDktkuuEhiijajopgh7hJTpzuvDNakNXdepqqAefDNhideij9j09AK01uDhrkl3d" + "uvdnvwwxlmKLrBBLxymnrBhrmwwGmw_" + "01234789abcdefgijmnopqstuvwxyABCEFGHIJKLMNOPQRSUVWX", + "QRktghijbcCDBCabABefkl0astzAlmFGyzdeGHSTEFcmmndnENnxnooyDNtD7hhi8irssCjt" + "JK5fRSktklTUeoopfp4elvUV3dvFwxKLIJCDyIISoyxyFPOXkuuE2cstVWEOblMWWXNXHRwG" + "GHxHFGHRmwdeGHuvuDCDBCABzAyzyIHIGHwGlvDNBCrBqrpqfpefdecdbcblklkttD67wxrs" + "1bqAakghxyCDCMLMKLAKzAyzxywxvwuvfgjtKUGQJTUVdnKUEFlmwGpzno9jzJghAKpzisqr" + "fpopsCrskuGQFGGHxHisrsqrpqopnonxxHGHFGEFuEkuakajqAdn34gq8909015645343ddn" + "mnlmbl1b01098978678iisDNCDuDuvvFQRuDCDsCis8i78676ggqqAAKJKIJISRSQRPQFPvF" + "KUUVVWTUUVVWTU", + }; + + solutions["IBM q27 Montreal"] = { + "1:24:3:5:7:8:7:a:9b::c:e:df:e:g:i:j:i:l:km::n:p:o:p:q", + "gjcdpqlnbeilmpdefiegacjmcfbegjopno7aln47acilcd7a14cf471214_3bcdjmnq", + "cd7aac58be14cd47eg35237a8b1412beop47decdcf23fibe01ilmpaccfln_1578abdo", + "jkno35895889opeg017ampac58gjeg12jmde23cdmp35588b58acop7a4723_03579ekn", + "jmcdjkdebe678bmpeggjegbe8b7aac58jmmpcdpqde3589mp5835cdcfdefihi_3689bcjk", + "jkgjjm12231412lnilfi47eg14gjegac7ajkac47cfde7acdeg14ac7adebede12_" + "23cjkmnp", + "35jmgjmp8b23il7a58acbe358bcdeg7a58begjegjm8bmpjmgjopjmnode89mppqjm_" + "2378almp", + "pq2347cdeg14ln12noacopnoil7alngjdeegjmpqfiil47cffimp35cdgjcf58acjmdeopmp" + "8bbe_27cdelnq", + "be1223de89677aac14gjcdjkjmacdeeg01gj5847147aac6747egcffi7ailaccffiillnde" + "cdilcf_01269bgm", + "47ln7acd5835mpil018b58fi23cf671235acfiil7a23jmlncdnogj35deeggj23ac58cdbe" + "8b89beegde_0468bdnp", + "opfijkjmegcfcdmpbedegjopjmil8b58eggjbedenojk8bcd35ac7a588bbe35delnnocdln" + "cffiopmphi_38fgikmo", + "0189cfac147a478bacln7a14cdilacde1267nocfbeln7afiillnnoopegacgjcdcfacjmfi" + "de7acf4714cd01_0679fnoq", + "accdbeil47147a47cfac8bcd12pq7a14ficf12opno4758ac35cffilnillnacfi7acfac7a" + "cf471401fiillnnoln_124cdelq", + "12deacfi35cd58238b4735147ahiac6747cfdeegillnopficfbe12ac8b7agjillnjkegno" + "58ln47gj89accf01fi14jmcf35ac23_12345aceghilo", + "hi6789op2335be12no2314fi470167cf144714mp7a12illnhiegjmopmp14cdacdecfbegj" + "8b47bejmeggj14egbe018b5835588b_0234569bhlnop", + "89mpdecdeg8bcffidecdjmhibe14cfopgj47cdegdebe148bbede5889cdfijmcfil231214" + "accf7a234714fiil67hi122312_0123459acdefhilmpq", + "jk7a89gj23jmln47beacil01lnmp7ano12oplnilcffi01egno8bcfdeac58237a358bcd58" + "8bacilbe7aeg58477a35lnjmacnolnil_03479cegikmno", + "12beln01noil2312filnop358bde2358be35mp8beg58cfgjacilno8bhifi7alnnoopbe23" + "cf12ilcdjmmpegbede8bbe588bbeegcfjk_0148befhijlnp", + "gjjkhi14accd8begcfdeegcdfi0147deilpqnogjmpegacjkcf7aacgjopcfficfmplncdil" + "7a47decdde147a12filnac7a47no7acfop_148afhjklmnop", + "897apqdecdacegbe8blnnofiopnode47cdbemp35ln23128beg2312gjegjmdebe01jkcdil" + "cfmp14pqmpac477a4714acficddeeggjcdeg_01579abcdilmq", + "acbegjop5801358b5889jmcddeno234714hiegbe127a47de8bcd2314cffibeaccdcflnde" + "cd35eggjil7acfegfi23hi471412011247de23jk7a_0234589aefghp", + "7ajkilaceg352367cd8b35opgjpqcf47acdeegfi587anobe8bac14cfjmilficd58opln47" + "35benocf1214illnil01fi2335cfac58noop23no_2456789adfgklmpq", + "01ln8bjkbe23pqgjcfjmilopno7a67mpeggjln14jmilopbejkac7acdfimp35cfhideac7a" + "eg58il8bbe47opnoop7aeg89accdac14gjde7a01egdepq_02678fhiklmoq", + "67be358bdebe235812cfopegac8b35mpjmjkbegj89jm8b58op7a142347673514mp8901cd" + "no1458477alnbeegbeacopcf8bacbefieggjilegbelnil_01235689befmo", + "bejkjmlnfimpno8bde47op01125823897abecfpqaceg3567mpcd8b58gjbe89jmeg35de7a" + "jkjmillnficfacilficd477aac14cf7a47127a14ac017a477a_04689beikmnoq", + "67bejm4714cf01hi12mpdecdcf8bacln7abedecd47gjfiopac2335decf587aachi8b8914" + "no8bjmbe7afi4767il7aegmpjmdejkcfjmfiopilpqlnilno_012468abdefhlmop", + "acopbe67no588b7agjhimppqjmcd35debeegmpgjjmdefieg2358opnobegjac8b58cdaceg" + "be7ajkeg8b35mpjm8958lndegj35eg8bgjjkgjmpegdecfcd_023568aeghjkmnoq", + "cf14581201cdfiegac3514cf7afiilfibelncd47cffide238b7a58ileg8bop35noacjmln" + "2312mpfi01cffi7a14ac47op7anocdcf12illn4714fiilhifi47cf_0458bcdgijlnp", + "no7a678b58mpgj01il1247eg35bejmgj147aegjmmpopbeac23cdln8bjm7a58nogj4735mp" + "de6714ficfegbejmgj01decdfi8bmpilpq23eg58hicfbeegficf_012345678efgjklnop", + "fidelnegbe8bgj01127acdcfde01cdjkegde4735nocflngj23accffiegilcfopcddehipq" + "beacgj8bop7acd898begacjkbecddegjjm358bcdcf67ficfcdeg7a_" + "0145789begijklmnop", + "acpqcf017a89cdmp4758acjk12fi1435deilegjmno7a89gjegcf47cd14gj23op58mpfi8b" + "jm12cfjk3558be8beggjjmegfi01dempcdbede12opbeac588b35bedecd_" + "0234579cdefiklnq", + "ac6701hino7agj35jm23opln8bmpacileggj35benoeggjln58jk35no8b584712fipqbe23" + "cf7ailhigjegdeopficdbeilgjlnac35cf4714jm12mpgjegopbeeggj8bmp_" + "12456bceghjkmnop", + "op477anode14ilegfi8bgj67acbe5835jk8bcfeggj01ln7ailde23jm35mpacficf477ail" + "cd47acfi122335cf7acd1423deil58op35ln89noeg8b23be358bil58gjbe_" + "0134689begjklmpq", + "8bcd7ampacjm47be7aop124714fi4735debemp5835678b23gj12beegjmjkbe8bgjjm588b" + "beegbe8b3501588b14be47357aegdecdac47de14cffibe018b89il8bbelnde_" + "013678abcdefikop", + "8blnnoil47ac58eggjdelnbefi8b3523cdde35il58becdcfeg8bcdde14begj7a89cdac58" + "egjmmpgj8bfi35pq23jm127acffi584789ac7aac6723cfilficfillnnoopnoil_" + "0124589abcgjklno", + "ac5847be7a12deop8b14mp3558cd23egil12be478bcfno35fi140147decdbecf8bpqopln" + "89eg8b23deacilgjbeeg8bdecd14677aacfi47gjnojm14cflnmphifiopiljk7aacjmmpop" + "jkln58finocffihi", + "jk8bop89mpaccd12237adeac7a477a01cd12beegpqildegjln35jk8baccdegfidecdacil" + "7afi471447cf6723ac35017aac47cffi1447583512il8blnbeeggjegbeno8bjmmppqmpjm" + "_0146789cdegklnoq", + "7aac47cf8914pqln7amp01jm58finocdac35cfbe47cdilopfilndeil7ampgjjm148bhiac" + "ficfcdcfegfiln233547be7adeno588blnbeac12cdeg35584723gj148bjmac12gj476747" + "3523_012479dehilnq", + "8b5835hi12jmopcfcdmpfipq14iloplnde4723gj01cfficfnoacil7acdde12egbeacgjde" + "237ajk14fijmjkcfac677a3547lnfi01148b14477aaccddebe8b583523illn_" + "0123456789abcdefghijklmnopq", + "12jmlnil897anodecd8b58be478b14aclncdhide89mp8b357ajmgj58bepq8bdejmegbe8b" + "12cd58fiaccfnoacopegmp47233523finoilln7ail471214gjfijk1214noacopcfacgjpq" + "cfno_234579acdhilmopq", + "jmhi89becf67fi12mpgjjmcdcfde7aaccdgjopilde14mpfiegbe8bpqgjbeegjkln7agjjk" + "ilegbeopcf4712no1423ln01jm8b584735gjeg8bbefieghi58238bilfigjcf5835lnac7a" + "jm67_24689bcdefhkmopq", + "1223acgjlndecdjmmpde35ilnobehi14opln8bgjfi12hi23iljk477a47deaccfficd7acf" + "debeaceg1489fiilgj8b677a8901pq47bejmmpeg1258bepq8bjk5835be23eggjegjmmpjm" + "_1235689abcdefghkmnopq", + "pq478bgjcfac14cffi7a35ilcffijkhiaccfmpeg23477agj47jmfibe1435hi238bln4767" + "becdac58cfjkmpegilde8bfilnbe8bac7a58no47il14lnilcd1214ac477aaccf67acno7a" + "acfi_124568afghijklmnoq", + "1423op01mphifinocfilac7afi47cfgjacfiopde587aeglngj35il1447be8914nocf1223" + "8bfijmcfaclnmpjk58cd7a8b67cfopgjnopqcddeeggjjmmpopnolnilfiaccdacdepqbeeg" + "jmgjjmeg7abe67cd8bac89cd", + "12egnojm47opac7acd47beac8b8901mp23degjcffiegjmbe147a477a14358b12accfmp7a" + "ac1447fide7abe67hiilacficdgjjmpq8baccfgj7alnaccd14eggj1223fijkde7a47il12" + "gj1447_0134689acdefgilmno", + "acfihi7acfjmcdmp23fi4712ac67ilgjlnjmpqde147abe89egcd35de8bopmp23cfaceg47" + "cdcfjmficf677agj478914accf7abede35fi47egjmilfi58becfaccf2312140147lnfi7a" + "no_135679abcefghijklmnopq", + "de471412147agj47no14ac017abecf89cdilde35acfi58cf357a23ac3547opjmmpfigj8b" + "7aacbecd1447cfeg147agjiljmfi12lnmpopcfjkac7acf677afiilficfac8b58cf897afi" + "hificf_0234679abcdejlmnpq", + "2335fiac14jmcf12jkegmpil7aopjmgj47ac14bempde8bcdcfficfbecdlndebe89cdacfi" + "5823no7a471412il2335588bfibeegac89decdac147ade2347cf67ac14egcffi12higj01" + "jk12gj23_1234579bcdegijklnp", + "acdejmcffimp7ajm67eghijkfiac7a01gj8947ac7acf478b58fibepqde358bbeilcddebe" + "fi14cf5847lnnofiacopnoop8begilfi7acf47deac891412cd7a4714deeg7a12accf23fi" + "127a477a_034569cdfhijklmnpq", + "cfnodeficdjm8beg674758dempcdcfbegjjm14opmp01cdfiegbe8bdehiil357a58jkln12" + "47gjfibede148b122389cdcfac7afiachicdbeilpqfijm47de12cdcffiil14mp12gjegbe" + "gjjmgjeg8bbeeggjjmmpacpq7a8b", + "fiegbe8b01opac127a677ajmilcf23mpgj58eglncdnofi35acdejmop7ahibecd47pq7acf" + "mpjm14pqcdgjegjkde8bcdacjmcffimpilcf01cdbecffiillnnoopmpjmgjegde7a478b67" + "_0123456789abcdefghijklmnopq", + "8b58befieggjhilnjm35128bmpilde588923eg14gjbe124767jm35egjk58pqjmopnobe8b" + "2335bede58gjcdjmgj8914122301acfidebempil128b89pq8bbedecd35decfegdecddebe" + "8b588bbede_23589abcdefghjmnpq", + "pqbe8bilfi677agjln58cfacmpcfdebeilfijm47eg14gjcfjk7aacdegj12cddeno35hi8b" + "egbe8b5835pqegcf4789gj14017alnopaccdde8beggjjmgjegbe8b89ilfidecdcffiacil" + "7a47ln14_13456789abcdehijklmnoq", + "ilopmpcfde35egac7a2358ln3558nocd8bgj4714be12iljmlncf89opficfhiegacpq8b01" + "23gj7aaccfil58debe35ficfac7aeggj47jk14ln01beno127acdac7a677acdacac7a4714" + "122335588bbedeeg89mpgjjmgjmpegde", + "mpopjm23mp14ficf01587aegilcdlnnoopgjdeacfiilbeegcd8b67begjcf7aacde8b47cd" + "cfjm35gj12fieg1458be7a4789ac1223cfhifi12ln14cfpqcdnodecdac7a471412233558" + "8bopnoeglnilacfipqcffiacgjjk89hi", + "23be1447eg8b5835fi12nocdacdegjegcdbejm7a23ac8b7ail583567opgjlncffidebe7a" + "egac8bilmpcffigj58jm89lnmp7aopno01cdopbejkcfilpq471435ln8bopnoln01il14fi" + "4767cfbecdde_012456789bcdfgijlmnop", + "jmcd7adegj47egbeil58ac7aaccfcd12ac146723jk47mp127aacpqjmcfmpfiopjm14eggj" + "3547mpjmcfgj2301eg12jk1423mpbe35il588bbenoeg478914gj67jkgjlneghidecdcfno" + "be01lnfi8bbe_0124678abcdefghjklmop", + "hiacjkgj8bopcd7a5847debe14noac8b7a89ileg4701be2314pq6747cdac12mplndeficd" + "14jkil358bgj12dejm23opcf47mpjmacgjegbe7aac583547fiilfieg141214gj01jkgjeg" + "lnde8bbedebe_0123456789abceijklmopq", + "89jmegmppqgj35fiegbeiljm12mpln47148bhiaccfdenofigjcd23eg35477ailcfachiln" + "jkbeegil67cf018bjm7apqacgj1223egfiildebe58cdac14jk7a35ln8b23bemp0147deeg" + "89gjeg140114de_0125679abcdehiklmnopq", + "12cdcf89hi8bfibe8bdeac5823egcd358b12jmbede14op7agjlnjmmp47cfbe58acil1401" + "nopq8bfi12opcfil89be7aeglnno2335ac23pqfiillnfi7anocfilfioppq471447il7aop" + "accf7a67decd7aaccdde_0134579acdehmnpq", + "67cfaccdde7acf8912fi47opegcdgjnojmbeegmpac8bcfcdop5814gj897a0147jmde23mp" + "acbe12cddeegfipqacgjilfiln8bjm89jk678bbe8b7a35582347eg358bbe23148b47588b" + "7agj471412jk23eg35_234689cdefgijkmnoq", + "gjeg47mp14jm23behilngj35jk7a8b67egbeac8bcd588b0112decdde47beilcfmp8bfi14" + "opeg47pqhi2335898bbeegmpgjjmgjilegdecddebe8bmp5835238b126701beeggjjmgjeg" + "be23358b583523_0123456789bceghjklnopq", + "8bbe12opcdjm58il67ac358b5801egcf14de23becd8b7alnpqno89fi1223egde3547lnbe" + "8bcfgjopegdecdbeacjmjkgjnoegilcddempegjmlnfi7acdcfpqmpjmfiacgjegilcfhifi" + "cfbeacil8b58lnno_013456789bcdefgiklmpq", + "14mpbe7ano8bde58gjficf4723cdopegdebe8b35lncdacnojmil67mp478912lngjpqfimp" + "pqop14jkegbejmgjeg477amp8b58ilgjjmopbe8bcd23mpdegjcd675835be23opeggjnomp" + "jmgjmpcfpqfimpegbeilficf_2456789abdegijlmoq", + "cf7aficdil12ac14471214mp23gjficf89deeg12pqfi35ilno1447acln58be8923122301" + "7aopaccdilgjcfficf351447acil147aacnoopnohicfficfpq67cdjmilgj7alnacdecdcf" + "deilficfac7aeggjjkjm_023456789abdefhijlmopq", + "be23cd12de35cdmpjm8b14opde01accfeg2358il47ac14depq7aac12gj4735no147acfac" + "cdfideop23aceggjmp7ajm47begj67hifiopln8b12nocf0112ln4758opfi3523decd8935" + "decfilfiilfibe8bbecf58_0123578bcefghijlmnopq", + "gj35cffi8b58hiegopiljkbe3547cd7acffide8bbegj23aclnilcdde67eglngjfijm7a12" + "mp23depqgj35cfacficddeegdedebe8b5835231214477aac8912ilno2335lnilgj8bbede" + "debe8b5835231214477aac0114fi01jmcf89mpfi4767", + "cdjknoiloplncffideilgjnobehiac23cd58jmdebe01mp1447ln357aopno2347accf8b7a" + "ac67897acdfi5812cfacpq473514hi23cd581214ilficf47ac89fiil67ln7a4714011447" + "beeggjeg7ajmbemppqmpjm_0245689abcdefghjklnopq", + "58ac7aeg47cf8bopjmmpjkgjbefidecdcfac355814jm1223mpilfilnno7a8b47beopeggj" + "35de1467jmcdpqilcfachi7a67mp47lnfiaccdegac7a4714122335588bbedecfcdjm01eg" + "debe8935122335588bbegj12eg89degjcdaccffiilhiln", + "acgj8b7acdcf47lnnoilop58be358b23egac8901jkfilnpqcfgjnodecd14ilaclnegfide" + "cf7aac7ajmhi8bcfbe58cdfiopac477apq8bacnoil58lncd67denocfegac7aac47cf1412" + "234712figjjmhiegbe8bbeeg_03479abdefghijklmnopq", + "jklnilac7a12bejmficfillndeac4714hi47nompegln01gj7ail58148b47bejm01egln58" + "ac8bnoopfi35cfacgj7a23fi89ac58becdegde35gjcdhi14no67acln8b127a01ac1467no" + "58cddeeggjeg35jkde588bcdac_0123567abcdefghkmnopq", + "opcdbeaceg7alnbe6723degjeg8b35bejmdepqcdgjjkac12mppq237adecf35588bbeeg89" + "decddebe35accf7afi4714017ahiacficf4714122335588bbedecdac7adefiilgj352314" + "acficfegdecddebebedecdac7a471412233558fi01ilgjln", + "89il35fi67benoopjmeg23cf1458acmpgjeg7ajkcdgj8bacbe7a3558ln47jmno14hi8b35" + "be89deeg01beln12fiillncd8bbeopdecdeg2335noopcfpqcddejklnfi14be588bbeeghi" + "477agjjmilgjeghide01cdmpcfil_0123569abcdefhijklmnp", + "jmilln1423be4712de3514no588b23cdbe35cfegcd01gjfi89deeg8bcdgj7aac7acf8967" + "fiilfi1223cfln12cdnobe7adecdaccd7aop67pqmppqdeegjmjkgjeg47be8b7a5835decd" + "ac7a4714122335588bcffi_0123456789abcdefghijklmnopq", + "no8bacmp23017ail6735ln12becf898bac23cdfi35op7a015835cfhicddejmilgjbeno12" + "47acfilnegcd8bopjk14begjnoegmpgjopdejmjkgjegdecdcfnopqfiac7aac8b583558cd" + "8bmpdeeggj47jmilgjegdehicdac_023456789abcdhiklmnop", + "01no14lnbe8bcdopfiilde12ac8947gjbejm7acdeg47be8bdehibegjegcfmp8bopacfiln" + "de89gjnojmcf14fipq58358bjklnmpcd7aacbe23egjm477agj67egde14behijkcdac128b" + "588bbede14be8b58352347141214_0345689abcdfghijklmnoq", + "ln8b125823cdbe35noopcffiil47pqaclncffi8bil7a47gj14eglncdde01benocdgjcf8b" + "47jk89ac7aln47fi588bmpcfopeghifigjegaccffibe8bilfi14lnno5835cdlneggjcffi" + "illnnoopmpjmgjegde2312ac_0123456789abcdefghijklmnopq", + "accdhi0135opildegjjmcfpqbe7aac14eggj237a8bbe12cd5835mp47jm8bcf23no5835de" + "cdop89nompficf7aachificfcddeeg7aln14gj01jk14678bdecdac7aaccddebe8begpqgj" + "jm47jkjmdecdac7aaccddeegbe8bbe_02345789bcdefghjklmnpq", + "7a47jk35cfgjfiaccdcf12il14fihi8b7a58dejm47cd7a23acdeeg890135cf14befi1223" + "7a8bmpgj47cf7adeilcdcfde58acbecfoplneggjiljmficfmpac7a8bjmbe47ilgjlneggj" + "jm8b58no35ln14476758mpil8b1247be23_02345789abcdehijklmop", + "opegjk35acbecd7amp14de8bcdacnoln1247opcdpqbejmgjegno8bbe897aop8b14bede58" + "mpcd8bcfjmgjac23cdgjegdecdcffiillnnoopmp7abehicfgjcd47fiillnnoopmpjmgjeg" + "decdgj35jmbehiilcfcdde8blnnoopmpjmgjegdecdcffi018914016747", + "egbe8blnjmcfmp58gj35ac23finoegcfjkfigjcdde12opjmbe8bde8958illncf35hi23fi" + "12cfno4701mp7a1467ac12opcdde7abe8begil47gjbe8blnpq58358bbefiegjmjk2367gj" + "noopcfnoac7aln47be8b5835231214477aaccffiillnnoopmpjmgj89hi01", + "figjcd8bbe89acegil7acffi47ac14lngj7a4758nodeillnac8bcdop14cfno017a35jm14" + "ac7a4714begjpqmpeggjbejmdepq127a588begjkjmmpnoopmpjmgjegdecdcffiilaccffi" + "ac7ajk67143523ilnoln47no14122335_0123456789abcdefghijklmnopq", + "acopfimpcd14de1235eg7a67opcfac7agj47cdcfdejmfihiil14egacnocfop01cdmpdefi" + "14gj23cfacopbe7a47ilegcdgj14accd7aac01bede8b89ln58cdjmgjeg8bbeaceggjjk8b" + "nompgjopno58egbe8bbeopeg5835mp23gj3558_01234678bcdegijklmnopq", + "be8beggjdecdmpeg14be3501cfjmfijkmpgjjm58pq8bnohiopmpjmgjegnobepqil233558" + "8bbedecdac7a4714be5823ac7a4714122335588bbedecfcdfibe8958352312ac7a471412" + "2335588bbede89il7acfaclncf7ailfiil_0123456789abcdefghijklmnopq", + "deno6747egjm7agjac142312cfcddebefi4714jkdecf7acddebe8bhiil47fi58cfmp35eg" + "hi23be8beg58bedefiln35il2312cd01fijm14mpcfgjac477aacopmpcffiilnoln01cfcd" + "deeggjjmmpopnolnilbe478b8923583523_0123456789abcdefghijklmnopq", + "47cdfidecffi14egilaccdlndefiop12cfcddegj7ajkfiacpqbe477a8bcfegdegjhijkfi" + "cf14acjmcddebeegbe8b587agj8beg35dejmac47237aacmpilcfjmficf67cdde12benode" + "hicd8bcffilnilfi89587a35cfno01accdde89eggj237ajm14124714014767mppq", + "debe23128b7acd89gjmplncf5835eg47figjjmjkbe14hicd23decd8bgjbeeg58cfdefi35" + "8bcdcffi23deachiilgjjm7a4714egmpcfjmop01gjegficf12ilbe58ac89lnno23587a47" + "opnocf14fiil3523588bfi125814cfaccf47fiillnilficf_0123456789abcefghijknp", + "147aaccdcfdecdde477abe12ac148bjmjkdecdcf47de23fi126701cf14ilmpjmlngjcd89" + "egjmdecd477abe8bbe8b5835231214477aaccffiillnnoopmpjmgj8bdecfcddecfbefieg" + "gj12cfjmac23pqopno7ahilnil4735231214477aaccffiillnnoopmpjmgjegbe8b89pq0" + "1", + "egde58gj35cdbehicffideacbe8bjkcdegdeiljmlnegcd47897agjbenoac67opegcfdemp" + "4714fiac7a23il58aclncdcfcdde12cf47fiil35be8bjmln01588bgjnolnopcfil14ac47" + "7abeegacbecddegj23jm47mp8bjmgj12egdecdcf14fiil58ln01no_" + "0234568abcdefhjklmnop", + "cfeg23jmmpjk1412ac357a234714beegaccdnogjjmac7aeglnbe8b47defino01egaccfde" + "ficdgjacopbe14de7a5867cdil35lncffijmcf89ac23mpcf7a1247ilfihinocfpqcd35de" + "jmegacdegjopcdjmacdeno7aeggjjkgjegaccddeegcdgjjmgjaceg7abe_" + "01234678abcgijklop", + "beeggj8b7a58opacbejkmpcf478b3514eglndecdgjbe7aegac0158cffiil67de7acf47cd" + "ac89jmgjpqmp3514jmnogjeg6747debe7aopmpnocdde14122335588bbedecdac7a352314" + "cf47ficf7acddebeac7a4714122335588bbede89pq01cffiil_" + "0123456789abcdefghijklmnopq", + "pq588bcffi35gjjmbeacegnohiopcd58cf8b7afiilbegjeg8bde12lnmpcdac47cf14nofi" + "pqdebe01de89jmcd58ac7acfilmpjm8b4714cdopde67lnbe01decdcf35jkfiaccfacilln" + "nofi7a23jmmpcf4712ac7a14cf35jmopfi6712accddeeggjjkegdecdac_" + "023456789acfghijklmnoq", + "gj1223cd58jkde01be47eggjbecf14jmmp8begbecddecd89ac7acfac8bbedeficfac7acd" + "ilfi47cf7aacopnolnilficfcddeeggjjm14mpegbe7a8b5867013512147aacfi8bbeeggj" + "jmmpopnolnilficfac7a471412233567jkpqfioppqilnolnnoilfi_" + "0123456789abcdefghijklmnopq", + "3512141223beildecd8bcfgjjmegacbedegj8bmpegficfnoln5801128baccd7a47opjmpq" + "mpfiilcffi35hi67cfdebe8bgjjmfi58accffi23ilcdeg7anoln14deno47beac35898bbe" + "cdac7aeg58lngjiljk8b67hideeggjde12mpbeegpqmp14jmbe018b583558gj_" + "023456789abcdeghklmopq", + "bede8bcdmpeg14cffipqbegjde89cdilcffiegln8bbedecd588b12bede8bcd23jkaccfac" + "7afihijmil35aclncd35588bbedecdac7a471412gjegbeficf01no7a8bac7acf67fi5835" + "7aaccddecdac7a4714122335588b7a01lnacilcfegficflngjopjkacno7a67op477aaccf" + "filnilficflnac7a47", + "fimpgjcd58hicfjmdecdaceg7a4735gjbede1489cd58ac7afiegde23cd35egjkgjeg8bbe" + "58eggjopmpjmgj8begbedehi8bbedecdac7a4714122335147a58ilcddeeggjopmpcffiil" + "lnnoopmpjmgjegdeaccf7a47146712233547144712233558ac7a470114477a0189ac_" + "0123456789abcdefghijklmnopq", + "gj4714jmfibemp1258il7agj8baccddeegbe67eg7a67gjcf23355889jmmpopfimplncdgj" + "47nodeegcfac7agjhifi47jkgj144712il23cf588bbedecdac7a4714122347egcd7afide" + "ileggjjmgjegbe5835ln23cdac7a47cf140114477a122335588bbeeggjjmmpopnolnilfi" + "cfac7a47pqoppqnolncfilfiillnnocf", + }; + + solutions["IBM q7 Jakarta"] = { + "1:23::5:5:6", + "563501_06", + "131245_345", + "12350145_23", + "123545_0123", + "13354556_14", + "45351213_14", + "56453513_45", + "120113455635", + "45130112_015", + "45561235_1345", + "01563545_01236", + "13124501_02356", + "3556351345_346", + "45355635131201", + "56134535130112", + "124513355613_24", + "1335121301_0235", + "1345355635_0156", + "0113125635_12356", + "1312354556130135", + "350113014556_045", + "351301563545_013", + "3545135612_13456", + "4556133513120156", + "01134535451301_04", + "013512133556_0235", + "013513351245_0125", + "013556453513_1345", + "124513355613_0234", + "12563513350156_26", + "130135133545_0145", + "13351256133545_12", + "564535134501_2456", + "121335014513563545", + "12133513121345_235", + "13013556133512_012", + "133513561201_12356", + "13563513451201_016", + "135645563501_01346", + "351312560135134501", + "35561213123556_256", + "355612133545_01456", + "4501133556_0123456", + "564501131235_01346", + "12561335130145_0126", + "35451256011335_2356", + "56011335121301_0356", + "56123545130113_2356", + "56351301453545_0146", + "01131235451356123556", + "01133545121356_02345", + "12354513354513_23456", + "12564501133513_01236", + "1301351335563513_015", + "13123501451335561345", + "135601351356_0123456", + "13560145351345_03456", + "3513124535130113_045", + "35561235453545_12345", + "35563513121335_13456", + "56013512134535_12346", + "56351245130135133545", + "1312451335561301_1245", + "3513350156451335_0145", + "3513563501133512_1356", + "3556451335131213_1345", + "4513351256131235_0124", + "12014513354501_0123456", + "13123545135635_0123456", + "13350145133513_0123456", + "1345351213350145_12345", + "3501451335561312351335", + "351335120113355635_015", + "56351312453545_0123456", + "121335015635134512_0246", + "131235451335563501133545", + "1312453501133501_0123456", + "354513350145135635131256", + "3545355601133556_0123456", + "13354501131235560135_0124", + "45351301135635131213_1456", + "01131256351335124535_01236", + "13121356351345350113354501", + "0113354556131235130135_02346", + "0135134535561301351213123545", + "0156131235134535130113_02346", + "13354556133512131235_0123456", + "1335560145131235134535_01245", + "011335124513351356011335_03456", + "560135131245351356351312_02346", + "12133545560113351201130113_02356", + "35451335134512133556351312_12345", + }; + + solutions["Reduced IBM Manhattan q65 (only q51)"] = { + "1a:2:3:4:5b:6:7:8:9c::d:h:l:e:f:go:h:i:j:kp:l:m:n:q:t:x:B:sC:t:u:v:wD:x:" + "y:z:AE:B::F:J:N:G:H:I:J:K:L:M:N:O:P", + "rCjpFGrsGHpxHI8cstwxot78tu67uvfo5645vDvwefuvdead0a01_cjoCF", + "zEmnENnqABqByzxyzAABwxyzxyMNzAzEvwwxuvvwvDLMtuKLotJKfo_mzBEN", + "4bbhhi1223ijIJ454b01bhjphi344556DJpx1223344bijjpbhhiijjppxxypxjpyzzA_" + "0145I", + "fgmn67ghnqIJqBDJABhivD78IJij8czAzEuvcltujpotklfoENMNefdepxxyyzzEENNOOP_" + "6fmIJ", + "klrC89MNjkrs8cENzEcljpzAstpxtuuvklvwwxjkABpxjpjkkllmpxxyyzzEvDDJIJHIGH_" + "9lwCM", + "8cwx23clmnzAklABqB34nqlm4bbhjkmnvwjppxwxghuvklfgfojklmotstvwrsrCtuvDCFDJ" + "jpFG_28nxz", + "efqBfg45nqmnvDghhi4bbhijzAvwwxzElmhixykljkijklhibhjp4bpxxyyz3423xylm12mn" + "zEENzEMNnqqB_5eABD", + "ad45OPde8c4bclefkljkbhfghighNOjpijpxwxfojphivwMNfgLMKLJKijDJjkvDuvghhikl" + "ijlmjkklcl8c89_58aoP", + "010axyaddeclwxvwyzefuvfoxytu454botwxfostkljkvwefbhrsghdejpfgadpxfoxy0ayz" + "zEotENtuuvvDuvDJIJ_15cyz", + "8cxypxjkrCcljpklcljkstkl8czE89otENGHrsadclstlmkltuOPHIdeuvjkvwwxNOENijhi" + "ghefjkfgOPghfozEpx8cjphiotjkstrsijkljppxlm_8ajmsyzCGP", + "JKIJ01qByzHIijxyhiwxjknqABghzArCrsijothi1223zEENfgfoGHtu34bh4bstklbhvwjk" + "hitujpuvtuvwwxijstxypxjpyzrsrCzApxABCFxyyzpx_0jkloqzBCK", + "ENGHFGotfoJKtuDJbhLMvDCFghijfgzErCfoNOMNjpLMrspxstrsrCMNzAvwotxyKLJKfoIJ" + "efotwxABdeqBadHIpxjpGHjk0aklclnq01mnlm8c89cl8c_3bituHKLNO", + "klvwbh78uvwxhipx67cldetuijvDjp8cvwefot780aclst8cklfgjkjpkladclrsrCghpxkl" + "8cwxpxjpvw786778jkklcljkdeeffootfostrsrCCFrCFG_067bdkovwD", + "DJ89mnjpvD8cpxjkGHxyuvklijlmrstujkkljppxclwxjkhiijABklotvwyzstwxghfgxyzE" + "vDhifootstghotDJfoyzIJqBzEnqfgENfozEotstrsstotmnforClmCFcl8c78_" + "9jklnruAGJ", + "IJ67DJjp5645lmCFxyvDefNO56EN4bzE67uvfgklzAjkpxyz78bhrCtuijghothizEfoeffg" + "rsst8cABdeclotqBfofglmnqghbhmn4bnqadxyfgefyzdeqBzEfgENMNzELMot_" + "47efjmxFIO", + "yzGHzAad4bhiijdexyFGwxCFclefjpABpxyzhivwbh454bxyghwxvDbhrCfgzAyzghfoxyfg" + "yzwxrsotghvwwxfoDJtuIJvDotuvHIkljkijsthiotforsrCefCFijdeFGad0a0112_" + "45achizABH", + "CFrCstothi0abhzEgh01yz34rs124bijbhfo4bef45zAfgdexywxstghfomnfgjppx23lmvw" + "5634yzuvtuotuvfostvwclxypxjpwxyzrsotxytu8cij677867rCuvhi5689bhyzzAABvDDJ" + "yz4bqB344bnqJK23_3abghnsAEF", + "JKvwvDNOlmDJyzxyjkpxKLvDxyzEJKyzijFGzAENxyzEvw89kljkjphibhijhiwxpxjpOPbh" + "4bvwNObhijwx34jpENpxuvwxvwwxxyvDhiyzDJIJzEMNDJtuotLMKLxyJKIJHIIJpxJKKLjp" + "ijxybh4bbh4556_9bckmwxzAEFKLOP", + "GH23FGdeklbh8c78LMefxyfg45wx4b34vwmnhibh4bjkKLlmjpijJKghCFDJpxhifgklxyfo" + "jpjkrCbhuvghfgottupx67otyzxyzErsyzfojpstuvijotjkvDDJ56tufopxuvIJENwxNOef" + "45devDklfo34clDJOP2312zEEN_25bcdlnyHM", + "12vDENmnclDJlmJKgh23ijuvnqOPKLjpzEbh34MN784btuyzLM8cMNclpxbhNOwxMNmnkl12" + "hiotij23ghKLvwqBfgeffonqvD01JKOPIJHIGHLMjppxfgxyyzDJghKLIJJKFGwxzEENxy12" + "34HI23IJwxNOGHbh4bCF45HI_1347cgijmuvDENP", + "qBnqjkij12foABNOmnqBMNzAlm45zEkl34OPLMhi23AB56xy45KL34otstzAghwxvwdejkJK" + "efijNOfgEN67klyzzEhibh78foghhi8cclMNvDxyIJyzlmxyLM4b8czEDJKL34EN4b78NOIJ" + "OP23mnpxHI12IJjpijfghibhijfootstforsrCst_134bdfiknqwyBOP", + "zAlm1278adtu67uvsthivDtukl23NOjkfgbhghCF4buvrsEN8c344bjp89DJhiijxypxjp78" + "styzrCzEwxJKrsotxyclvDjkvwwxDJKLLMvD23vwkl12fojkfgstjpgh8cotklclJKpxwxjp" + "vwfovDlmbh4bDJyzvDxyvw45fgbhmnnqghqB56ABhizAzE_167cdfimrstyAFO", + "fo23mn8chiefijlmgh4bde56qBOPefNOfokl344bclzAot45stEN34zEjpABpxzAbhrs23jk" + "89klqBAB7867nqjphijkvwwx56pxjpvwvDjkijqB45mnwx4bnqkllm8945mnpxclvwjpwxbh" + "DJ8cpx89vD8cxyghIJfgfoghlmmnvwyzotzEENvDNODJIJyzvDHIIJGH_" + "2689bcdhinoqvzP", + "GHlm12fg23foJKFG89OPghefNO8cclhijpijlmjprC8cotDJtufostyzotuvENMNefvwstCF" + "rs78zEstENtuuvzAzEAByzfo34vwfgforCwxrsqB4bbhrCstpx67hiijot56hi45jpNOpx34" + "jkfoklclxy23yzghfgOPbhfonq4bzEghmnENlmcl8c34bhmn89cl124bvwMN_" + "19efgmopsyCHKMP", + "hiijJKzA2356gh34fg4b8cjkhi45yznqkllmghbhmnjpDJKLFGJKijclvDnqxyklvwwxOPot" + "hilmpx34GH23jpHIfofgjkxypxyzijvwNOkljp12uvjkxybhij4bzAyzbhDJvDghENzEtucl" + "34bh4b45uvbh01pxotyzxyhituotfoijghotefdeadfgpxef0adejpjk01kl0acl8ccl89_" + "268fghkpqtAFKLP", + "564bot0aENrCABvwCFstjpfozAwxyzrs67fgrCzEpxjkijtu45bhwxnqxystMNghadvwbhde" + "4bjpmnhivD78DJwxLM8cvwKLot45efJKclfoefyzotlmbhfguvfoijdetuklmnhiijvwuv56" + "rsot4bbhvD45jpIJpxHIxywxDJtu4bstjkIJvw34tuijyzuvhixyyzzAghABfgtuotefyzfo" + "deaddeef_0456jkqstvBCEFN", + "8cfgghCFNOOPrCKLENLM0112FG0ayzNOrshi4badzEIJzADJbhENHIdezEefIJstotABclfo" + "uvvDghijDJvw23otfgjpfotugh34zAvDMNwxENklGHmnvwIJqBHIzEpxotjpxyABjkuvwxnq" + "bhlmtuvwzAqBmncluv4bvDjpnqABpxqBklcl458cIJJKcl4byzlmclzAxyDJ8c5689mn8cKL" + "clnqJKlmLMMNyzNOOP_0148fnuzCFHIKOP", + "ad8cotxy78clGHvDhighpxfg34xywxDJvwjpHIuvzEFGGHijIJpxhiHIfotuOPJKjpKLyzuv" + "LMMNbh8cghABwxijfgENxywxvwzENOhi894bpxuvENtuzAyzxyABwxzAqBzEyzefIJxypxde" + "jpijhighpxotMNbh45fgnqyzefghclfoAB4bdezAstyzJKklKLotcl8cNOstefxyadqBIJyz" + "pxrsbhrCstABzE45CF56deLM4567yzMNNO4bMNLMOPcllmmnclnqqBABqBnqmn_" + "4789aefghijoptuvwyzBDEFGLP", + "pxnqlmDJ34jp8cHIfgotvwIJmnqBefhigh23CFFGGHijhibhuvkl4bOP34ijtu67JKrCstfg" + "hiKLCFbhrsvDvwrC4bstjprsDJot45cllmghijxyHIfgNOJKnqfoMNotvD4bhijk56wxDJst" + "jpIJefbh4botghpxvwjpfofgLMHIvDijwxotghKLdejpvwwxpxjpad0auvhiijbh4b34bhJK" + "23st34hiijrsDJbhJKjk4b45vDuvbhkltu34otfoeffoot231223tu01340auv01_" + "23578befimpqstwxBCDFGIJKLP", + "ABDJ56clxyyz45ijjppxENfohi0azEijmnxyfgJKfojppxlmkltughKLxyMNbhjkLMyzKLEN" + "78NOefIJxy8cvDjpvwclDJ4bpxzEfglm34otENzAyzefmn01hi12wxOPzEEN23deuvpxij34" + "nqABqBghjppxxy23wxvwwxhiNO45stadABnqbhvD56mnzAENzErs67ENyzxyrCzEwxNOvwij" + "OPwxghpx78jk4bfgABlm89ENuv34clkl8cjkjpzEclijpxghlmqBfonqtu23rs78st120134" + "wx67hivDotmnlmcl8c78cllm_67abceghjnoprtyzBDGIJLMNOP", + "0auvijklwxvD45DJvwmnxy34tu01otwxuvvwyzstfo4bvDMNot89pxzE23rsxyyzwxzAst12" + "JK8cGHhiENbhDJAB23tu0178stjkxy0aklFGjp34IJHIpxvwfgCFjpuvrsJK67GHrCwxzEcl" + "jkFGghbhklfoKL8c4bcl5645NOijpx34jprs8cpxeflm8923MNjkLMfgvwkllmjpyzJKIJxy" + "12wxfopxjpyzxyzE8cotyzHIJKxy45tuENwxvwjkKLvDvwwxJKNOpxOPijjpIJpxwxvwklhi" + "ghuvjkclJKvDtufgzEENDJJKKLDJ_359achilnprstuvxyzDEHJKLMO", + "nqGH01zAIJadqB34bhzE12mnAB89wxOPDJhi4blm0aghnqjkFG4556jpLM45ENrCvDmnMNde" + "xyijyzzAqBnqpxfg78efvwfoxyyzrsCFklottuzEENjpstijhipxghxyefrCaddersJKyzIJ" + "jkHIijNOuvlmfgbhjptuABef4bwxpxfohixyvwstij23otjpuvfgtuGH67453456ghzAjkzE" + "jpbh4bzAhi34pxklqBmnuvfovDstbh45efABrshijpdenqqBjkDJijuvadlmhicl8clm34zA" + "efjpfg23fo454bgh3412234b78px01klJKclkl_013569abgiknqwyzBCEHIKLNOP", + "zEENadABvDOP23yzotbhNO0afgfoghGHotstijefDJtujp45zEmnENxyadfg12yzbh4bFGpx" + "qBlmzACF34otfokldehi23wxef01ijrsrCdexyyzghjpbhfgnq4botvwvDghpxuvsthi34wx" + "xyfg23rsyzDJtuij12adzAJKjpmnwx23zEfootfoghfgvwwxghbhpxhibh4bbhijjpjkhixy" + "8cbhpx45vw34jpAByzxyst23otqB4556yz45foFGefclpxdeklwxvwefjpvDfootjkGHstHI" + "ijDJ4bIJwxjpHIpxGHxyyzzEyzxyklcl8c89kl_01258bdfijnoprstuABCDEHJNP", + "ENklCFjpKL34ijGHadmnJK0a01DJghrsfg4b34OPIJHIlm23devDad8cbhIJrCghLMhifo0a" + "fg01zAMN78otfojkfgefotclvwJKKLpxJK45dejp8cadABNOwxtupxxyghfootyzIJLMzEqB" + "bhfoEN12zAuvMNijstDJ6756HIzEefnqotGHmn4bFGqBjpclfopxJKklfgxyENKLdebh4556" + "zAABzApxJKefyzotqBnqqBjpghHIijtuhibh67uvijjkLMlmvD4bNO34uvmnDJMNNOOP4bfo" + "MNdeotjppxfojpLMtuwx23uvvDDJIJuvDJ_013578abcdfhlnpqrtvxzADFGJKLMNP", + "bhzEABDJvDIJnqefadstqBENpxABdetumnwxzA670avw78ot56jk8cnq4bkl89OP67ijyzDJ" + "4578xyghyzlm23hizEuvjpijjkxykl12fofgghbh4bwxpx56tu4534otwxvDjp23vwpxNOfo" + "uvhimnijwxbhvwENvDpx4bDJvDNOclklghzEjpOP45ijwx6734IJyz56fggh45hibhxyuvhi" + "56ijbhpxtujppxyzotjkkluvfoxyyzzEyzvDDJENMNIJvDef4bHIuvtuxyIJotstpxforsot" + "bhENjpghfgghfotuuvvD67DJvDuvJKtubh788c78_12456789abdeiklnoqswxzAIJP", + "OPLMvwbh4bqBef67vDwxde8clmENNOnqkl780ayz45ijfgclvwlmxyKLhisttufojpmnJKuv" + "ij8cbhadef4bhibh3412wxcl01zEyzghot45xyvwDJzEENfgpxjpjk67wxstuv4bzE89hikl" + "ijENrCturs56rC67560a78cl45adfovDnqothivwghvD67jpst12zA56AB454bpx8cyzCFxy" + "mnzA56pxfgbhjpfoyztujkjpgh89pxbhxy67MNefzEDJde4bENzEfgpxJKfoyzotzEjpfost" + "KLef45ENfo5667NOOP56rsotENstrszE45otforCrs_" + "2345689abcdefgijlmpsuvxzBCDLMNP", + "zENOFGuvtuGHyzwxstpxhi34vwfgrsDJbhij45ghvDHI01DJIJOP4b89mnbhMN5623ghxyjp" + "344b45vweflmijwx4bjkfg56hiENyzbhzA4bxygh34ABvwNODJfopxuvvwbhENyzzA12rCxy" + "tuwx8c23vw12NO34jp4bstpxvD45bhzECFotyzHIvwhifoxyyzzEjppxDJcllmwxjkENyzmn" + "vwkllmefijuvGHjkNOnqJKDJENcltuvwjpmnpxstwxrsstrCdevwadvD0adetuDJuvjpkllm" + "kltuotxyyzstrszEjkklclfoyzstoteffojpvwstwxvwpxkl_" + "024569bdefhiknprstuwxzCDEFGJKOP", + "78FG4bghGHyzstijzExyjpFGhiclotklbhnq56yzrshi8cENvDlmzEpxwxCFxyyz01HINOEN" + "rCKLjkjpijLMpxIJDJjpMN45sthiijzEwxfotu4bzAmnpxfgvwfojk34otefbhst67rsgh4b" + "vDCF23st340ayzadbhxyEN56pxOP78yzotfgdekllmjpwx67efNO12gh23hifoDJijENjkfg" + "zEkl01vwcluvxyyz8cxyzAjpwx12vwpxbhABwx4bbhgh78vwjkhixyyzbh34ot4bbhhi8978" + "34xyij23jkuvtuhibh344bbhkluvjkvDhijppxjpjkDJIJDJHIwxvwuv_" + "1457bcehjmpqrswxyzACDEFHKN", + "stDJvw67foMNfgrs4btuENxyIJ0amnGHyzvDlmbh34zAHIFGwxxypxNOkluvijgh56DJxy45" + "yzadrC34GH56vwsttuuvdeotAB0a0167zExywxxyvw78EN8c0afgIJhiyz89HIjpwxtupxfo" + "efotwxjk67jprstuuvvDjkfoadotDJGHJKzAij8cstotxyjkkltucluvrsvwklforCdepx8c" + "wx89jkIJKLjppxot12vwCFxyJKvDijfgghDJyzbhvDfohi23ad34de0abhzE4b34yzvwot23" + "4bbhENIJHItuxypxGHjpxyMNjkkloteffocl8cefHIcljkjpdeaddeefot_" + "0134679dinorsvxyzACDFGIJMO", + "jpjkMNyz6756st12dersENklwxghrCzECFefvwLM01uvtufopxvDjp23MNfgENAB45uvNODJ" + "ijstclwxlmmn78qBtuzA67rspxvwKLjpFG4bJKwx56vDot0avw8cbhnqwxDJjkKLklLMpxfo" + "KL45MNijvDghOPuvJK56rC34qBfglmIJxytu4bghbhsthiCFotijjkjpvwwxNODJ89vw23fo" + "34uvefdetuotpxHIklmn4bwxIJvDadstfo12DJ01yz0almjprsjkstotvwbhjpstcl4bwxde" + "rsfo34rC2334pxefklwxJKfocljkjpvwdevD8cCFjkKLvwwxJKLMclDJIJHI4bGHFGGHHIIJ" + "_12678bcdghijklmnopqrstuwxyzACDEFGJMN", + "pxfo34qBDJefvwotkl4bJKyzOPzA89DJ23KL4578mn56lmdexyad0afoJKABjpwxstijrsot" + "IJ67clyzfghiuvbhrCzAyzvw4b128cxyvDwxJKpxyzjpjkEN01ijHIzEklvwclNOtu78hiyz" + "uvCFghxyDJfgpxMNGHIJjpDJ45ijyz56wxEN34zEhipxjpbhjkfoLM4b4567KLijENHIvDgh" + "otMNklvw56NOjkklFGhiwxstfgghbhENclpxDJ4bfoyzvDIJCFDJrCot2312fgstghrsrC8c" + "rsclxy8901fg45LMFGfopxGHJKMNbhfg0aHINOOPNO01MNstjpLMKLJKpx12wxotvwtuuvwx" + "vDpxuvtuDJ_2345679cefhijlnopqtuvzABDHIKLNP", + "nq4bzAHIfg45clvDKL67fo56mnDJhi3423JKvwuvklwxCFbh8c89otstclLMijjkvwlmgh12" + "45yzhi8cFGxyzEMNadwxtuNO4brsstKLLMrsuvbhyztuENOPvDij4bvwhi78DJkljkclmnkl" + "IJABxy344bwxzEyzHIGHrC0azApxABghxyNO678cJKuvvwcl8cwxfgjkjpyzghefijbhzEvD" + "ENKLpx4bfoJKhideHI89ijMNIJjp56zEJKuvxyotfoKLwx67px342378yzxybhpxrszEENyz" + "vwjpHIzALM34st454bGHij67bhhivDyzvwij56bhNO45FG4b34jkbhkl23efDJIJ12HIIJGH" + "DJdeadde_12346789bcdefghijpqruwxyABCDHIJLMNOP", + "ijpxENABrswx1234jp67zE78lmrCCFNOhixyfgzA67qBpxijMNAByztu8cjpstrs45uvwxot" + "fo01vD2356tuvwwxpxxyENLMjkbhvDotghefdeDJuv0ajpzEvDfg4bstpxvw3478yzhiFGef" + "89footwxbhghfgrCCFHInqijrskljkuvvw4bDJadrC12JK45clstfoqBzAKLot8cLMIJpxhi" + "klnqvDMNijlmjkDJJKvDhideuvgh01CFrsfgjp67clKLmnfoefbh4bfo0anqkl01hidepxtu" + "lmad12rCuvjkxyijjpFG0avDpxrsjpxyCFIJHIIJtuyzDJFGJKijhimnvDnqGHghqBadvwzE" + "FGbhEN01NO4bOPnqxyENmnzE_1245689bcdefghijkmortvwxzABCDFGHJKNO", + "foijwxvwrsrCpxvDwxxy34yzbhvwjpotghCFclKLhikl8cfg4b23cl45wxghdevwstjkklFG" + "ijrsfoLMNOadhiMNefDJbhxygh56deotfotujkstIJadfguvLM4bwxvDrC67DJvw78455667" + "3423KLlmENijuvklzEENjkvDHIotOPvwfowxghtubh0a4buv45ghvDJKIJxy01CFDJbhuvmn" + "JKrszAfg12ghbhrClm56KL4bklstotrsNOfo67bhJKrCefstot01fgIJdeghstadfohirsEN" + "st0abhvDvwpxotyz78xyrCstCFefwxzAfgrC01pxvwABvDrsstzAdeyzjpzEyzad890axyad" + "pxzEjkklcl8cclxyjpklijhibhhiij_23567abdgijkloprtvwxzCDFGHIJKOP", + "OPrClmbhJKkl78cljkNOijghmnzEotENfgfo8ctuMNzA23hiABklghzELM56uvfgclKLJKst" + "4bqBnqtu67jk56ijIJefstbhde45EN89klqBrs8cDJvDzAABvwlmHIhiuv34jk4bmnzEENjp" + "wxadMNstIJxyghfgtu23klyzvwDJijxyzAjpuvstpxENrC34clzEjpxyjkefhighwxzAqBde" + "ijABbhzAqByzhi67ENxyjkklwxnqghvwLMqBvDwx01MNlmpxclijtuotLMjpyzfozAefABst" + "DJrsstGHjkotpxFGIJkl4bzAjpyzxywxtu12px23fo45wxvwwxpxclfg8cefyzjpzE56cl34" + "2312otEN67564bNOklENjkzEyzijbh_0245789abcdefglmnostvwxyzACDEHIJKLMP", + "HIijjpvDgh898cuvtupxstzEDJ780aforsxyotMNJKjkkljp4bfofghistghnq34rCmnpxrs" + "vDKLDJcluvENbhwxfoijvwCF23JKzA4bfgAByzotefghstbhKLjp89defo67rCzA8c45pxfg" + "IJadfoghefhi0aotfgijrsfozEefxy4bbhghhizAklqBjpwxfgABtuijjpzApxjkDJlm45jp" + "dejkfoclvDhinqstvwFG78otvD8cforsrCeffgklstLMdeHIqBrsCFDJrCghrs34sthiyzij" + "zEyzclkljkot4bfoxyefGHclbhpxjpyzijENdeaddezEyzefhixyfobhij4bwx34otpxwxpx" + "ENvwvDNOOPDJvDjpijvwENpxIJxyyzzEyzxypx_" + "012345679defghijknpqrstuvwxyCEFGHKLM", + "ijJKwxxyjkyzadklrCzEwxCF4btuvwef01fglmDJ78ENvDstfokljpMNzAvwABtumn6756px" + "IJclhiefdeNOwxrs0apxFG8crCefxy23bhDJuvad45GHjpHIpxyzghzEvwwxjkjpijhistpx" + "wxvD4bDJuvtu12xyfo34otyzIJ01jppxxyjkklqBvwijENjkbhklOPnqzEyz4bcl3456gh0a" + "zAlm23jpvDpxJKDJijvDrsstjphiKL34NOtukl45LMijMNuvfg56LMjpJKmnghqBtu34jkfo" + "hipx67nqbhhieffg237812ij01stjkklvwdeqBefadNOABzAdehi23xylmKLJKKLOP4b5645" + "0aAB4bpxqBzEIJEN34ad4556jp6756HI4534ijIJ23mnbhhi12jpghpx_" + "0278abcdehijkmnorsuwyzACEFIKNOP", + "wx45nqotfo784b56fgjpvwuv0a45LM67ijjkkltu34zECFvwlmuvDJcltubhMNNO568cotIJ" + "fohiJKmn78rCijefrsjkhituvDnqxyKLwxLMyzpxDJvwJKqBENuvtu67vDzEstijjkklde4b" + "KLDJnqxyJKcllmKLotwxfojkbhyzmnhi4bENst34fgNO45zEvwadpxghrs8cij56hijpzAij" + "78nqjk89pxklclmnwx78ghlmuvfgclstottujkfo4buvot67MNfozE45qBvDefENhibh4bot" + "MNLMfo89454bzEzADJbhvwxyvDhideIJotuvABKLJKadKLHIqBzAtuDJIJDJuvvDfoijhivw" + "rCjkzEwxefENotzEghCFFGDJxyfgghfohiefNOdeOPJKKLfoNOJKDJot_" + "0345789abcdefgijklmoqtuwxyABEFIJKMNO", + "67ghfgMNEN8cjk78pxLMjp56pxfohirsDJklNOxyghfgwxJKrCijcljpMNKLJKLMyzxystot" + "0ajk45zEhirsENqBijpx67yz8cIJjpkljkHIclCFfobhxyNOhiFG01GHnqDJzEEN8c784b34" + "OP23px12uv8cNOghxymnklvDENtuclotstjp23fghijkvwlmef34pxrCyz8crsmnuvotMNCF" + "xybhpxfoDJotFGghrCjplmstwxrsijefpxdenqvDyzxyad89otzEef8cfgDJfovDklyzfgJK" + "jkIJghotclstotxyqB8cjpJKklpxKLbhjk4b89jppxjkwxlmLMmnvwnqhivDABKLfolmbh45" + "otkltu4bJKMNENDJ45zE56efIJzAABdezAefzEENvDstrs45jkqBMNbhstnqqBHIGH_" + "56789afghjklorvwxyBCDFGHIJLMNOP", + "MNEN78vDnqyzKLFGABvwjk89DJNOwxmnGHclvDIJvwstLMijuvCFotklFGzE12xyDJjpefJK" + "qB6723bhjkfopx8cadOPghHI78yzzAxyhiijcljpfgAB0aENMNpxjkyzxyghjpKLLMbhlmot" + "wxvwvDklzE45DJst8ctuNOrsrCadvDqBzAEN01CFABuvIJpx12vDwxnqzEclhistMNjkyzDJ" + "4b34JKijfoqBABmnghtuGHfgotjpghklENhipxnqqBtughfoefHIdefg23ABxycljkefFGqB" + "adghIJdeHIbhzEuvvDnqJKGHDJvDfgKLjpklLMMNclFGNO8cGHMNclLMklpxCFjkKLJKkl78" + "mnlm4b34klgh23fghi45ijjpuv56foij67hibhwxotvwhiij78vD67DJ4bvDfoIJHIvw_" + "015679bcdegiknqsuxyzACDFIJLMNOP", + "rCclMNjpvwCF01tu4buvENpx8c7889vDFG45lmotGHfoDJotjp56bh4bnqABwxmn34klhist" + "bhlmzAjkrsfgnqcl67vw8crCyzJK4bCFjpxyOPwxijotKLyzfo0agh45otxyvDtuotjkef78" + "delmmnbhfgfoFGNOadcl2356hiotDJstdeefotrs4bfonquvfgghijvDDJMNpxjpJKijvwot" + "LMeftuIJKLqBHIhibhotuv4bfoefwxvw4534ABfoJKzAotGHzEENtuzE23zAijklpxvDDJvD" + "4bNOOPjkkldeadlm0anq01stjpijtubhuvadtuuvvwhiijotwxpxjpfoeffoIJotstrsijwx" + "hibh4bvwvDDJvDvwwxJKrCrspxstotfofgxyfootstyzzEENMNzEyzghxyrsbh4bbhgh4556" + "_12345689cgjlmoqsuvwxBCDFGHJKLMP", + "GHfgmnnqfoDJstbhqBottughhiuvbhjp8c0a4bJKfgrsforCottuefCFuvzAbhijyzvDKLhi" + "fovwwxotstdeuvzArspxtustefENDJuvvwghfgrCwxpxjpadjkpxxyzEuvLMyzABefMNders" + "rCxyefpxCFkljpijzEhipxghclhifo8c89ot8ctuuvvwvDwxxyDJzANOENzEJKvwLMOPzAyz" + "ENNOENzEKLyzxypxjpijhistefIJjkbh4bde45kllmadDJ0a4bvDbhijgh56effgghbh4b34" + "2312010aadmnwxfopxhi4bfgghbhfgnqghFGotstqBfofggh4b45bh4b34bh56ghfootrsCF" + "rCrsstotfofggh234bbhhiij4bCF34jkkljpijlmhiFG4bcl8cbhhipxwx78vwvDDJijjk67" + "8c4bmnIJnqklwxjkHIpxjpjkpxkl56wxcl8c", + "MNuvmnJKIJwxnqABCFHIzA34yztu23xyzE4bvwfgDJLMwx78styzvDENfoNOFG34zE12MNJK" + "vw45xy56pxjpuv2312yz01ijjkKLklrslmqB8cLMhighENxybhpxmnfgjpJKgh4bfgtu45zE" + "DJjkIJ12kl67st78vDDJyzij89NO56zEhibhclotOP0axyghuvjk4brCklforsclpxlm34HI" + "GH23vwtujpwxotstpxjprsrC67jkNOkljpeffoHICFwxotdeefvwfgghbhdeMN4bFGtufo45" + "KL56ghIJfgghlmJKKL67vDvwmnLMpxwxvwrCvDpxDJuvjpijvDjkijhi78DJnqMNrsghKLij" + "uvtuklotstfoJKfgotfoNOfgIJmnstghKLrsHI67bhlmmncl8cGHOP788cclkljkijhibh4b" + "4556rC3423FG67124501341223340anq45561267", + "foqBnqvDjpDJxyhiIJfgMN1201ghotHIwx0atuefjkfovwvDdeyzotklzEadxyfgijDJyzwx" + "67vw34uvpx5645hifozAENNOwxzEjkJKtuotvDghpxbhcl0aij4bst34rs23mnxyOPstzAAB" + "efENzAzEforCotstvwDJvDIJDJLMrsefuv34IJvDstFGbh4b342312010aaddeeffg23MNtu" + "uvrCHI34GHghvwwxxyyzHIzA67xyENfoABqBfgotIJghnqABzEzAABqBnqDJNOmnstlmfozE" + "ENcl8c89kllmmnnqABzEkljkjppxxyyzzAABqBnqmnCFJKKLLMhirCzEclkljkMNJKcljpvw" + "bh8cijclrsklvDjkvwDJxywxpxvDjpxypxxyyzuvstotfofg786756454bbhghfgfoottuuv" + "vDDJJKKLLMMNENzEyzxypxjpjkklcl34st89NO23", + "yznqmndeijjpDJxyqBhiABwx01vw23bhjkuvadijderChiIJ0atuklNOvDuvpxzAtuyzHIot" + "clqBnq8cxyKLst89foJKqBENwxLMjk56tuGHefFG4brsjpvwzEyzkldefgghDJlmIJ45rCad" + "efxyvDMNdeENLMefzEcl348cjkuvklENpx23lmhiMNijKLLMjpwxadENMNtuDJhiotmnHI0a" + "adxyzAvwfoEN67ABCFqBstIJeflm12HIzAdezEefGHzAcllmFGmnadfgnqHIDJqBotAB01gh" + "fovDzA56yzbhxywxvwvDstrsxyDJIJHIMN0azEfgGHjk8c78ad4bFGyzstghijfgxybhpxxy" + "01hi4567zEENijfojkNOENotfo8czEJKKLfgCFLMrCKLyzgh34xy23JK4b34zEENNOOPNOEN" + "zE_124678abcdfghiklnopqstuvwxyzABCDEFGHIJKLO", + "GHzAbhmn4b010afojpxyABFGfgMNtuvDpxotyz34IJDJghfokl78fglmbhCFefghdeLMstvw" + "zA12HIvD4b8cxyNOjpjk4523pxwx89MNOPrs56rCvwnqklIJyzjpuvvDbhtuclotpxwx67DJ" + "zEuvKLstJKrsfozAhiijENNOjpbh4befzEGHfgpxjkHIhi8cENxy45bh34jpKLvwuvCFhiwx" + "4bghjkvwqBijkltuuvdevDstrsDJABotyzzA56xypxtufozEcl23jpLMIJyz34KLvwENvDij" + "jkhi78NOijjkDJ4512uvbhqBadtuJKottudevDDJ4b8chi012312IJDJvDKLklijuvtuotjk" + "foijnqlmhiHIgh67mnbhklfgghefnqfoottuuvvDdeDJIJDJvDbhuvtuotfofgfootstrsrC" + "rsstotfo_013456789bdefgjlmnstuyzABDEFGHIKLNOP", + "uvtufojpuvrCwxefyzzEvwENde8cot4b01fghiuvzExyDJclwxyzENOP12vwGHxypx2367st" + "fo4578ot01zAAByzsttubhadhidejkgh89fg4bqBkllm5645MNHIforsuvJKij674befzAvD" + "CFrCxyfgjp8cpxABhiijhiadjk34otbhghfgqBFGkl78tuxyfoGHDJefuv45ot0aclIJfo67" + "yzdelm56jphiaddeHImnvD45nqclKLef4bijzADJhiqBfoot34jkJKtuuvotfoCFEN23vDef" + "bhdeDJijxy120apxwxadhiJKdeghIJefKLfofgklJKotKLlmxyvwwxijLMstfoKLotvDyzpx" + "fojpzEstpxfgrsrCtuxyghwxstENDJbh4bMNIJbhvDNOklHIGHvwuvyzxypxjpijhiijbhjp" + "4b34px4bxybh_024789abcegijkmoqrstvwxyzABCFGJKLMNO", + "78xyGHhiijkl8cjkghhiMNclij67ENyzjppxxykljkjp78pxvwuvwxNOzEtuxyOPyzxyot67" + "fopxefzAclENfgghABhiijjppxyz8cxywxvwuvwxyztu89xyzEyzstotfoxypxzAjpclfggh" + "deefhiijlmbhhiijjppxmnwxvwwxxyyzkljkzAqB4b8cijABzAzEDJJKKLLMMNENzEyzxywx" + "vwqBbhfguvIJghhiJKbhijtuvDrsuvjprCad4bpxxyMN4534tuyz0anqotdezE23KLfosttu" + "LM12rsuvMNNOJKvwMNDJvDDJJKKLLMMNENzEyzxywxNOuv4btupxCFstKLrCfgefbhghotjp" + "foJKIJefjkadkldecloteffolmtuuvvDHIGHDJvDuvtustrsrCCFFGGHHIJKotstotfoef23" + "12010aaddeeffgghbh4b458c34566778clKL8c7867cl564534", + "DJ45vDJK67AB34rCpxFGDJ56CFzAjpKLxy78yz8cuv45IJLMbhqBwx67ABvwJK34zAjktulm" + "HIzEvDuvottufo7823otfoklefKL4bmnnqvDqB56ABuvMNpxLM67xy12hiMNGHij78deadfo" + "wxvwjpDJpxwxvwvDNOENvwjk34kl4523yz34clDJ4bxyjppxIJwxvwuvlmDJvDtuzEmnvwyz" + "jppxMNkljkuvtuotfofgghhiijjppxwxvDDJJKklKLst238cDJlm12clfo56tuvDpxbh45uv" + "vwhitu89lmijotrCfofgjppxfootxywxvwrsuvstotfotu67rCefmn8cdenqclklfgotghef" + "dead0a011223344bbhghrsfo45jkCFjpottuuvvDDJJKKLmnLMHIMNNOENOPNODJENvDzEuv" + "zAABpxjpjkkllmmnnqqBABzAyztusttuuvvDDJIJHIGHFGCFrCcl8c7867", + "ENGHvDvwijDJuvJKefKLhijp01tupx12wxstjkrsdevwghzArCLM56MN45vDfgxy34jpijwx" + "vwyzadxyhifoABuvzADJwxIJtu23stpxvDjpbhijCFjkklyzrszEvwlmjkxywxABzAqB4bDJ" + "efABrCuvmnzAtughyzvDzEHIstotDJvwbhfoxyfguvpxhinqxyJKrsKLstbhyzLMvDtursDJ" + "JKKLLMMNNOENzEyzxyotpxfojpjkklOPefjkijvDwxFGdeCFuv786756454bbhhiijjkklcl" + "ghadtulmcl67px8cDJqBjpNOij78bhlmOPnq6756mncl450a34nqEN4badzEbhqBklrChide" + "ijrsjppxeffostotyzstfoxyrsefdewxrCjkjpCFvwuvpxwxvwpxjpjkyzvD01zEadkl0acl" + "FG01uv8cDJ898cclklIJDJvDjkjpvwpxwxpxvwvDjpjkklclDJJKIJ8c89", + "xylm0avwJKad34uvtuhi238c78depxyz01wxqBjp670aclpxvw8cuvnqwxmnlmclfg89sttu" + "kl8cghadfg01jkjpstpx78vwvDrsDJwxKLIJxyyzJKotefijzEENyzvwwxvDhistjpbhDJpx" + "MNvwwxIJNOvwjppxxyyzzAABqBnqmnlmklrCij4bzEvD45rscl12ENyz0ajpklhiuvclrCtu" + "jk8cCFbhotjpGHclOPMNghstkl01forC4b0apxxyLMdeijefyzfgotzEfo45fgstrsghotKL" + "uvjk56jpvwbhJKzAyzpx34rCklwxHIijxywxghyzNOIJ4bHIvwfguvENzEyz45tustxyotjp" + "MNfo34fgghFGhibhhiCFFGij23jppxxyyzzEDJNOrsENrCvDvwwxpxjphibh345645566745" + "4bbhhiijjppxwxvwvDDJJKKLLMMNENzEzAABqBnqmnlmcl8c786789rs3423", + "wx45jkCFFGfo56678cfgvwxydezAghklhiwxvwuvyzvDDJtuuvtulmzAotjpfofgghbhABij" + "zEhijk4bxyJKadklbhstyz4bvw34pxxymnwxKLzAvwlmzEABijotqBuvtufo23uvjp0ayzhi" + "efABvDDJ4bEN12otjkghzEfoIJaddeefyzadxyrszAvwwxvwstdeuvpxENtursfgghbh4b34" + "2312010aaddeuvzENOvwxyyzxywxvwvDDJJKKLLMMNENzEghrCvwKLOPstCFjpNO4brsjkpx" + "ENsttujpkl89zEjkjppxwxvwvDuvDJtuIJJKyzotOPfoHIpxjpfgcl8cmnlmkljkjppxxyyz" + "zAABqBpxclIJghbhkljp4bvDtuijuvHItu45otGHhi4bbhfo4bvD45ghfgotDJtuvDvwvwuv" + "tuotfofgghhiijjppxvDjkHIpxDJjp56344556IJ34JK67HIKL78jkpxkl8ccl", + "xystDJfgghIJJKKLfoLMclvw8cklpxMNrCcl8cvDDJ12fg4blmwx34HIvwbhmnuv4bbhhiyz" + "ijghzE78tuhiENjprspx2334wxJK01stCFij12NOGHjkjp8c67bhpxxykl4bFGnqrCbhIJyz" + "hiijlm23HIjppxxy56jkwxvwENklclzEvDuvtu8cghotENmnGHfo89DJ78hiefnqdewxrsyz" + "zA6756vDDJ45bhFG4bbhfgghJKhi8cijfgjpKLhi34IJghCFqBjppxwxvwuvtuotfofgghhi" + "eflmnqhi4bclfostuvDJvD8c23xywxmnbhvw89lmhiuvrsjkjpjkpx4btunqotDJfoLMijqB" + "klmnIJjpwxefdenqjkpxxyadijuvvwAByzzAMNde0azEENuvwxOPefNOENzExyhifgghfgwx" + "vwOPeftudeadyzhiuvijxypxjkqBnqmnlmkljkjppxxyyzzAJKvDtuDJvDJKijvw", + "IJCFqBnqijfg67zADJefpxwxrszEhi78AB23zAJKtubhqB56vwjpijwxrCjkpx67footHIkl" + "jpdeKLuvLMclMN4btuvDghhiLMwxuvDJNO34JKzEENjkvDfgzEklABxyKLvwclLMijlmbh8c" + "OPGHgh4byzIJpxMNclDJhi89fg8cjkxyjpvDpx0aHIyzfoadghijjkfgxyGHbhNOwx78ENvw" + "4bkllm8cuvLMjpOPtumnclefyzKLlmotcl45JKhimnHIijnqfoIJ4b56zEbhghhi4b34238c" + "ENde67bh34qB45JK4bstABKLrsbhfgLMefghhiijotyzfo56jppxxy45jkjptuuvjkfgpxxy" + "34jpijad23wxvwuvrChibh1223344bbhhituotfostotfostCFrCrsij34st0123efjp1223" + "px34deeffgjpjkklclkljkjp_0245678cdefhijklmopqrsuvxyzABDEFGHIKLMNOP", + "uvvwtuhiuvotjpwxtuijvw8cpxwxfojppxuvKLefdeMNst7867bhtujkadLMuvklclstrsjp" + "rCvwwx56jkot8ckllmstxypxwxvDvwwxABrsDJot4b89IJvDDJjpfofgghhipxxyijIJjkJK" + "0a34HIjpbhyzrCstuvtuhi23ot12fouvpxvwENijfgghxypxjpstbhhicl8c786756454bbh" + "hiijjkefMNlmCFmndenqzAfgwxefNOGHLMrsclpxyzxyrCzE34zAlmyzENzEmnyzxyklENrs" + "otqBwxdejkABhivwNOtuuvtustotfofgvDFGzAghzEvwbhDJJKKLadwxIJ4b23EN3456cl8c" + "786756454bbhhiijjkOPCFpxJKjpNOnqjklmclKLkljkjplmpxwxLMvwvD23uvDJtuMNIJot" + "LMfoHIKLmnlmIJdeef788ccllmJKfoKLLMmnIJMNHIENot78GHdezEtuFGCFnqzAuv", + "wxfgKL34ghxytuotfofgOP23uvvwhitu0ajkotkluvlmLMFGwxfotuvwrCHIxypxdeuvwxvD" + "45adfgjpstyzMNNO4b12CFmnDJnqef67xy8c34otJKOPjkklGHvw23wxgh56IJ34lmzEfg67" + "ghrsfosttuyzfgxyuvmn12rs78pxKLjp0112stLMvwjkwxklvDvwrCclwx23px8cJKjpjkij" + "hiwxKL89klHIrsstbhotjkLMuvvwwxpxjpijhighfgfootclMNLMvDDJENJKNO4bbhvD0awx" + "uvefIJHItuotfodeeffg8cottuuvvwwxpxjpijhighfgijjpzElmyzclKLzAxypxABjpijhi" + "vDdebhklad0aijJKKLLMmnad4byzzEEN45qBMNzElm4bklxybhnqqBghENdefgzEjkijENzA" + "efAByzfgghbh4bqB34ABMN23zA_01235789abdfghiklmnopqstuvxyzCDEFGIJKLMNP", + "8cfoJKwxtu34stmnpxGHfgghMN89yzotfojpijCF78vDefjkKLENrswxpxxy6723LM12vwHI" + "fgklrCbh4bwxqBclhideABqBbhCFvw4b34NOzAjpDJpxjkFGvDvwCF23IJwxijefjppxzEgh" + "8c12lmJKABzAadHInq4byzxyklbhghwxvwuv01tuMN1245stmnlmtu4bclDJIJuvvDqBhi45" + "34HI23rsfgvwghwxhi7812ijjpbhIJ8cjkrCpxwxvwuvklJKCFtu7801otfofgijdeGHghnq" + "tuqBKLuvLMvDJKDJJK56vDuvhitubh4bstlmotbhABhibhzAclijstrsjkMNfoENzENOrCjp" + "px4b34OPxyNOfgstCFef45otfoyzrCENxyef4bKLzEbhst56ghrsdead0aaddezAef67fg78" + "ghAB6789JKIJpxbhHI4bIJJKjpKLjk_" + "0123456789befghjklnpqrstwyzACDEFGHIJKLMNO", + "jpfguvcl67DJadMNpxJKxyvDDJ8cyzij89jkzEkl0acljpdeENhiijbhCFpx8cadwxjkIJ4b" + "KLklvw56HIfonqLM01MNmnghNOlmij4534fg0abhFGLMCFxy4bwxhiijvwMNjkklpxjpijLM" + "ghtuhiijjkOPottuuvijvDefyzGHDJklpxfolm12vwnqbhhiIJefHI01de23xyijqBjpnqtu" + "zEadNOwxvwfgotpxstjprC12mntuuvlmvDDJclrs8c78vw0167JK56stfootKLLMIJ0aMN45" + "4bstENNO56JKrCzECFzAyzefxyklde8cjkHIjp01wxGHpxjpfoLMijfgghbhKLLMvwuvhiot" + "ijjkklzEENtuMNcluvrsvDotDJvDzEuv8c89tu788cotlmeffoefmnnqqBABqBotnqmntulm" + "cluv8cvD78DJ6756454bxyyzxy_" + "013456789abcdefghijklmnopqtvwxyzACDEFGHIJKLMOP", + "hirCvDCFjk56rs67ghGHtuijMNrC7845st4b34tuDJFGJKbhuvvwtuclyzuvzEfgCFENrsef" + "4bNOvDDJjkwxghhiqB5667IJnqHIbhkl8c78fojkpx45admnGH56lm4bmnijlmfg23rCotvw" + "ghLMdeqBxyvDadnqMNjp34footCFDJABefpxfojkstKLuvtubh67cllmuvIJLMmnlmkldejk" + "jpqBijpxotwx4bfovwfgvDghbh4bzArCxyhiijrsDJwx45clfojptuxy3401yzvwuv23zA12" + "JK23vD34yz8cxyKLDJ4bclJK12tupxot8ctuwxfofgvwpxvDuvABjpKLrCDJCFjkLMefvDuv" + "kltuqBIJ89clDJ7867ghFG45788cwxclbhklotABCFHIfojk56IJ4534pxot23jpjk1223px" + "klJKrCcl348c898cKLDJclkljkLM45MNNOLMOPNO_" + "023456789acefghijknruwxyzBCDFGHIJKNO", + "DJ78vDjkvwgh67klwx34jpijfgstMNLMxypxeflmmnuvhiENFGadotjpijtuuvyzghrsbh0a" + "rCfo23IJnqxyCF4bpxhiDJcljpijvDvwotwx01stbhfgGHyzde0axy34efzEpxghzArs12jp" + "JKKLjkNOfgfoMNklyzuvotJKclLMlmxyABtu8cuvDJ4bqBvDuvbhsttuuvvw23wxpxDJ45jp" + "78adzAyzrsABxyjkrCvwklvD34ENclrsDJHI8cIJotDJvDvwwxyzHIzEENyzCFgh45xypxfo" + "jphiNOOPijvwefdejkklcluveffootfgtuuvvwfoGHwxKLpx4bstjpvwjk5667otklvD5645" + "JK56DJvDstrs78uvstotturC8978strslmfosttumnuvvw4b67fgnqlm56bhqBwxpxjpwxjk" + "vwvDJKklDJlmIJHIIJDJmnABhivDjkvwijjkwxpx_" + "0235678abcdghijklmnopqrsuvwxyzABCDFGILMNO", + "efvDijpxjkDJklfgcldestrslmxyrCIJfoyz4556HIghottuuvhiadfgCF8crsfo89ijbhot" + "tujkhi67ijghjk0afgclkl8cjkefdejpstzApxxyhirsABijjpyzjkqBzExyyzzAABqBnqmn" + "lmkljkjpbhwxvwjpuvghvDvwclDJ4b34bh4bghhiJKklfgghKLhiijjpbhhipxtuqBxyjkjp" + "pxijyzxybhhi4bbhzE45344b5623wxzA12vw34vDzEuvrCvw4bwxbhhi23453445ijstjppx" + "yzxywxvwvDDJJKKLLMMNENzEKLIJrs56ijHI23JKIJ8cGHHIstFGhirCtuadIJJKKL67mnCF" + "rCLMMNNOde12ENzEOPzA7801bhKLvDABfo4byzzEvwotwx1223344bbhhiijjppxwxvwuvtu" + "otfoefdead0aMNJKvwjkwxjpkljk8cclLMMNENzEzAABqBnqmnlmkljkjppxwxvwvDDJJKzE" + "NO8cENzEyzNO", + "kl67ghfgbh4botefMNzEpxjpzAyzxyAB0a78foqBIJjkstJKzAclwxvwzEuv56tuefkl12st" + "LM67pxhinqdebhrsNOABENjpjkrCMNzE34EN45wxpxlmefyzad0a01zE4b56HIKLLMqBMN34" + "ijotIJvwvD67wx23xyvw8cvDJKNOklyzDJvDENuvtughjkstKLtubh344bbhhi0apxwxvwuv" + "tuotfofgghhiijHIkl78ot8cMNhixyrslmstGH34ghclotfoef67zArs56otdeyzABadfgzA" + "strCIJCFFGrCEN8crsmnstotyzxyfofgghLMhiMNwxOPNOij89lmOPvwvDefMNfootqBjptu" + "pxDJLMvDuv8ctuKLnqotfoclwxIJHIefIJJKqBde8ceffoDJvwotkltuuvtu89vDjkijadot" + "foDJhighfgghefhiJKKLijjkklxy0ayzzEyzxy_" + "012345679abcdefghijklmnopqrstuvwyABCDEHIKLMNOP", + "pxjk45wxtubhstLMkl4b6701clrsjpefABdeadMN34fgpxJKhiuvmnvw0awxxybhottufouv" + "yzijvwDJ4bjkKLhiCFot8cJKkl34wxjpxyKLijlmyzhi89LM23pxvDadqBzEmnstjpKLxyyz" + "bhDJzEJKENNO12OPijnqxyhiijIJghfgtuzEvDefuvdeHItuzAstjkhiGHklvwwxyzvD56AB" + "ENxyyzlmNOpxjpclxyijhizAIJmn34yzJK4534lmMN56KL8c67xyzELMgh23MNfgyzKLpxwx" + "4bJKDJjp34vDuvtu4556IJeffootstrCrsst45mnNOjk4bqBnqbhgh67fgqBfoottuuvABbh" + "FGvwwxijxy4b45wx7867vDef5645DJIJdemnyzHIzAIJadDJ4bvDvwdeCF2389zEwxbh12uv" + "pxtueflmhi23cl8ccllmfoghfgottughuvbhij4b344bbh_" + "012345789bcdefghklnprstvwxyzABDEFHIKLMNOP", + "34vDyzxyjk4b45jpzAotfgyz238cbhgh12ABklclDJpxzEfo4bhistENwx01ijjkjpIJvw67" + "HIpxFG34NOefxyvDwxvw56uvDJyz45zEtuvD785689klwxvwuvtuotfofgghhiijjpIJijst" + "deefrs6756JKot78fgghbhsthiijlmjp4b8cpx89ENwxjkjpKLpxrsDJLMGHkltuvwHIjk23" + "bhjp45vDpxcl0awxOP34rCvw4bxy12yzwxlmxyJKzE23DJMNvw12mn34uvzAJKENNOtursst" + "OPklABtuIJvDJKuvtuotzElmzAqBAB01fofgzAotyzwxCFghvwbhrsuvsthiij0arCrsstot" + "nqtustjprC23CFrCrssttuuvvwwxxyyzzEENMNLMKLJKIJHIGHpx12xymnclzEqBzAAB8cyz" + "xypxjpijhibh4b455667788ccllmmnnqqBABcl8c564523bhhiijhibh4b455667788cclkl" + "jkfozEfgbhghfgbhfo", + "bh78ad674bpxmnuvvw0ajp34ENijwxCFxy12MN01yzefsthizEvDbh78JK4bdevw45rChiwx" + "fopx56rsjpijotEN23xyNODJstMNhiadtughuvKLvwbhwx0ahiOPxyyzpxJKvDvwwxpxzAjp" + "ijxyDJ4b45IJpxhixyjppxyzxyrCbhCFhivDDJIJHIGHFGCFrCrssttuKLtujpijhighfgfo" + "ottuuvvwwxvDIJghABjphipxtu56ghfgfoottuuvvwwxpxjpij67jkklDJtuclyzef8cghjp" + "depxlmbh89xyzEyzxypxjpjkklcl8clmclNOij8c786756454bbhhiijjkkl3445zEghLMfg" + "KLadefmndefoijotghtuuvvDDJIJHInqJKhighad56fgIJij67JKKLefvwtufoLMjk788cot" + "qBABmnkllmtuzAklclmnlmmnENzEENyzwxpxNOjkijhibhlmcl8c786756454bbhhiijjppx" + "xyyzzAABqBnqwx34jk23vw", + "pxtu45kljk56lmstIJzEij67vwwxhigh56ENvDklfglmjpefuvvwtustbhde8cmnad4bijuv" + "px0a45xyjpHIhidewxbhvwotclghyz89rsjknqst4btuij78rsuvvwzEqBxystyzwxrCvwkl" + "rsvDDJstvDotjk5645cl67pxtujpfoot56tuhi8cxyuvfgbhlmvDDJJK45DJtuvDgh4bpxrC" + "klfgjkfoCFbhot34ef89foottuij23uv4bghtuoteffgfolmdeadclotrCmnstnqrsstqBtu" + "eflmdeefuvklvwyzxywxvwvDDJJKKLLMMNENmnrCjklmIJJKzAklfoottuKLuvDJxyvDcluv" + "DJtuyzotIJfoxyefLM0a4b342312010aaddeeffgghzAhiMNENNOij4bHIOP23ENzEhibhyz" + "hixyjp4bpxxy3423jpijyzzEhiEN4bbhNOhi4bijjp_" + "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP", + "GHclklhirCHIENghIJHInq01JKvwKLijqBhijk8c78LMad12vDwxfoottuMNuvNOpxOPvDbh" + "xyfgyzefijzEjpdeclEN8c0aGH67CFjkklghNOfgfovwHI564534ot01tuuv4blmvDjkDJ12" + "JKbh34ghmnvDklvwfgwxpxFG23footjpjkklclvwlmvDst34efad4556hitudeLMrsvwKLmn" + "adst67ijuvrCOPwx4bDJpx0aadwxklvD89vwIJHIjk78fgghvDijfgwxfopxxy67GHDJyzzE" + "efdejpvDhibhklENjkad0acl8956HIuv8ccltukl45ot4bjkfoijjkuvstbhrscl8cnq78vw" + "rC4b8cclkljkghjpfgpxxyyzwxmnlm45zAotstzExyABmnotfoqBrsvwcluvzAyz8cABnqpx" + "ghzA786778qBjpijnqjppxqBmnxy8cABwxclvwuvlmtusttumnzAuvvwwx_" + "12789abcdeghijlmnoqstuvwxyzABCDEFGIJKLNOP", + "jpJKfgwxghzAklmnjkpxqByzKLCFvDvwwxcluvvwABjpzAfotupxklij0ahiwxLMFGDJadbh" + "GHjpHI4bnqqBstrslmvwvDxyzEij45wxuvJKhiABbhENturCot4bfoCFDJmnyzef0ast34NO" + "23zAvDotvwwxpxrsFGrCjpCF67jkzEfgklnqijhicl8cIJdewxefghjpfovwotlmvDKLJKDJ" + "vDFGtuuvvwwxENxypxjpyzGHzEijjpxytuKLpxdeadsthiMNbhFGrsjphifgijhijpyzqBEN" + "NOxybhzAzEzA4bnqqBABzAyzxypxjpjkkllmjk34rCzE23OPotENghclbhijhiijLM4bbhhi" + "12jkijkljkjpxyyzMN0123kllmNOmnENclzE34yz45xypxjpjk8cwx89kl78cl678c5645cl" + "kljkjpvwvD4bDJIJfgfootstrsrCCFFGGHHIIJDJvDvwwxpxjpijhibhghotfgfoOPef3423" + "12010aaddeeffgghbhotrCstrsrC", + "cl78uv45wxfofg67AB12efLMCFyzpxghvwhivDotwxklzArsij788cclOPDJmnfo4bdelmGH" + "IJ23tujkstklbhFG34rCjpqBuvJKotCFghABvDnqIJpxHIefGHmnlmDJ01xyadKLtuyz0afg" + "uvzEcljkfohiqBNOotdersstnq8crsijadMNENJKLMgh78efKL0azAhiIJ67fgghfoMNrC4b" + "JKotHImnGHijtuLMfodehieffoottuDJFGjpbhuvadvD0aABuvjkpxijqBKLjpijIJhitu01" + "klxyCFclJKyz0azEforsghdeadfgef8cfo0aijot01zAnqLMtuzEuvvwmn89IJMN8cwxABEN" + "NOzExywxvwstfoefcluvdeeffolmclotfotuMNzA12fg4buvghvwwxpxstjpLMxymn01bhpx" + "nqij4bbhrsgh0aadfg348c7867rC78zE8cDJENclNOOPJKKLJKNOfoENzEklDJ_" + "1245678adefijklmnopqrstuvwxyzBCDEFGHIJKLP", + "uvfgad0aghbhtunq8cfoNOyzotst23fgfoghDJhi89zE4buvrsijfgIJefghjkxyyz12pxkl" + "34bhwxfgvwtuMNuvclhiKLpxHItu4bENvwijzAjppxABjkLMxy23wx458cghklclotijyzhi" + "ij8c78tuvwuvvDzE34tust4bzADJABMNotxy23rCfoefjk01yz56fgfovwderswxad8cotst" + "jpkltupxrsvwjpvDuvtu12DJqBotlmfoefforC23ottucl01uvjkIJOP0a8cENjpxyNOJKwx" + "vDDJvDuvvwzAKLtuHIJKvDotfoABIJLMdeGHFGeffo01HIIJDJvDvwIJMNwxotOPtuGHHIuv" + "vDpxIJFGJKadjpjkKLklDJJKLMKLJKDJvDLMuvtuotclfoef0aaddeeffgghbh4b34231278" + "6745564bhiijjppxyzxypxjpijhibh4b455667788ccllmmnnqqBABzECFlmzAklABjkqBhi" + "ijjppxxyhinqkllmmnnqqBABzAyzxypxjp", + "KLJKlmijOPfgrsxyvDqBpxLMuvDJKLwxmnjphixypxyzNOIJvDxyrCstvwwxCFHIzErs45DJ" + "pxjpyzJK674befENzE45ENotijbhxyjkklhitude56vDLMDJpxIJ0aHIjpuvGHMNclij4bbh" + "78ENrCad67ghFGpxLM4534HIjp23vDfoxyyzzE4b01bh12DJzAtupxxyotpxjp56hiuvefij" + "jp45depxtughuv34ABvDvwuvJKENyzfoot4bst562312foefotfgdeefaddeefIJfoot01gh" + "tuDJfguvvwwxrsvDpxvwjpxyjkhistwxvwzAvDtuklrsDJcluvNOvw8cIJjp89OPlmpx0ajp" + "01ghwxjkij78pxNOvwHIGHuvFGjpENpx0a67klxyjkhikltuyzcl898cghclzEfgzAstrsrC" + "rsxylm5645wxvwuvstmn4btu45uvnq5667vwghmnwxxyqBlmkljkijhiijjkkllmmnnq_" + "0356789abcdefhijklmnopqrsuvwxyzABCDEFGHIJKLMNP", + "vwaduv4byzJKMN34KLqBtupxjpzE23ENxyDJfo78IJyzNOijOPfgJKLMhi45bhjk4bwxot12" + "vw34zEklij23jkpxlmKLclMNjpuvtuzAst8cxyijhiijnqrswxLMyzrCxyvwmnwxuvyzABxy" + "MNENzEzAMNvDDJIJJKtuHIIJvwqBuvtukldewxvwvDDJJKKLLMMNENzEyzpxotuvefzAfoot" + "stjpjkHIrsijhighCFclbhotNOkljkxyhiyzENMNzEyzxypxefjpjkijfgfoklotfgLMwxpx" + "JKstjpjk4b455667788cclkljkijhighotrCfo898cbhABgh34GHwxfg45footrsstzAtuHI" + "gh4bcl56rsuvvw67vDpxlmyz45xywxvwyzpxmnbhkl4bnqwxghrCmnCFjpjkklijbhghfgot" + "foefde0a56addeeffglmghhimnijjpbh4b0a78pxxyMNENzEyzxywxvwvDDJJKKL3423clvD" + "uvotIJstDJIJHIGHFGCFrCrssttuuv128c0189clzAAB", + "MN4bCFuvjktuxyotyzwxbhklvwpxxyGHyzuvzEfotuij010anqefyzmnvDuvDJjkwxxyvw34" + "otwxyzsttuENFGzErsdefo12yzadxy45jpef01pxwxuvvwwxNOstrCfotuuvotvwijjptu0a" + "xyghOPyzde4bzAforsLMMNENyzwxAB23xyyzzAABqBnqmnlmkljkjpjpjkefmnyzclCF8c12" + "adfgklfojk01jpbhpx34xyclwxvwuvtust0ayzotvD89deABDJzE4bJKDJyzfouvrCrsstot" + "KLfg23foghxypxLMbhfgfootjphituijuv8c8c786756454bbhhiijjppxxyyzzAABqBnqmn" + "lm34klvD8czEjkjpvwENrCDJIJHIDJpxxyyzzEwxvwvDDJJKKLLMMNENzEyzpxjpijbhbhhi" + "ijjkklcl8c78675645IJHI8c56NOGHOPHIkljkjppxDJvwxyyzzEMNLMwxvwvDDJJKKLLMMN" + "ENzEyzFGpxjpIJijhiJKbhklcl8c786756454bbhhiijIJHI", + "45wxpxjp01jkefkl0aDJxylmIJLMyzdepxNOijhijpadijGHENvw56hiuv8c89stbhNOFGwx" + "MNotst8cHIpx78vDOP4bzAjpvwGHclhivDijxyhi45JKIJbhghpxCF4byzwxjkfgKL12jppx" + "wxfobhghvwijhiijwxLMrs0167jpxy5645pxjkxyyzfg8cklzEot78EN4bstcl8cbh67NOJK" + "HIKLyzrC344bbhxylmmn4bjpot34yzrsclDJJKhivDijhijp56pxjkzE23EN45NOstjpOPjk" + "rC34fo23otlmzAfoefghfozE12nqtu0aABLM2334MNdeklyzadpx45uvde0ajkhi56ijwxDJ" + "jk01vD12mnqBvwklIJefDJwx01xylmvDABhiyz67wxJKzEvwuvENklqBwxtujkotfostotMN" + "ijLMstzApxrsrCCFrCrsstnqotzEKLfofgefghhiJKijdeadzA0aadDJjkklABdeclqBnq8c" + "qBABfgef_012345679abcdefghijklmnopqstvwxyzADEFGHIJKLMOP", + "67ghjkjpOPhifofgDJFGvwijpxghkljplmhiijwxmnvwvDxyuvtuwxvwwxstrsnqqBABzAyz" + "xypxjpjkkllmGHmnotfofgghhiijstjkjpklclIJbhHIotfoxypxjpjkkllmmnnqqBABzAmn" + "12zEefrsfgEN4b45NOghDJhiOP3401IJijrCrsotJKbhhiijCFFGvD8cdeGHef89yzjpstxy" + "pxrsvw4bzE23jprCKLbhLMtu4bDJxywxpxjkIJhiadklHIfgclijrsjp1234450ahi56gh01" + "jkdepxklyzxypxjp8cad78clMN23hiijhikl8cENjkkl78jppxwxbh45vwclvDuvkl674btu" + "otstfootbhtu34fgjkvwjpwxghyz45zEENFGGHHIIJJKKLLMMNENzEyzxywxvwuvtustrsrC" + "pxrCwx78DJvDfgrsJK56yzDJfovDvwvD8cstKLJKDJotvDKLuvsttuforsoteffoefdeotzA" + "tuuvrCCFvwwxlmmnnqqBABzAyzxypxjpjkclad8c780amn67lmmnnq56", + "tu12ij23otwxlmxyjpvwvDLMijJKhiMNgh78zApx01ABcljkfoijhijpyzzEmnbhwxijfg4b" + "qBIJHIadefENnqpxjppxjkKLkl458cxywxvw34uv56zAvwNOhi0aDJ23tustwxbh4brsjkOP" + "yzcl89rCpxjpjk12px45de23ijABkljkwxfgvwad8cjpvDpxDJIJDJENzEcluvotyzklvDxy" + "stDJvwwxpxzAjpijrshibhENqBjkvwzEijjktuhighfgijfoNOENIJfgnqotlmjpJKzEDJpx" + "yzrCCFxyuvvwpxjpwxijzEMNENzEyzxywxvwvDDJJKKLsthiklMNFGtuKLJKDJzEijjkrCij" + "rshighuvfgbhfovDotstkl4botbheffoottuuvlmvDotDJIJde45efdefo34ottughotfggh" + "bh4b342312010aaddefoefde0a1223bhhiij3456jp45675645pxzEyzxypxjpijhibh4b34" + "2312010aaddeeffootstrsrCCFFGGHHIIJJKKLLMMNNOOP677889zAAB", + "uvhibhwxvwjp4bxyghfgijpxefyzhidebhjpxyghpxfgadzAfo4botwxjphizEeftuuvfoij" + "otjkENklLMstlmNOhirsyzvDxywxijbhvwzErCAB344bbhghhiyzfgghijfgfoEN6756jpwx" + "mnCFotpxefqBstfojpjkdenqxyefyzrsfgdeghijhighfg23klefijadrCde450aNOjkad4b" + "bhdeefclfoghottulmfguvfofgot01zAghvDOPbhst4b0a45563445klABfopxDJjpefIJij" + "vDhipxbhzEaddewxefvwvD786756454bbhhiijjkklcl34ad23MNfo0aclDJENjpzEvDwxpx" + "wxzAvwotvDstABkljpjkDJkljptursclIJHIqBwxnqpxJKmnGHwxjpzEIJMN12HIENJKKLlm" + "IJ8cclkl8cjk89jppxvwstLMtursMNuvJKvwENwxpxjpjk8cIJklHIclklzEGH8cjkjpvwFG" + "pxvDDJKLLMMNENzEzAKLJKDJvDvwwxpxjpjkkllmmnnqqBABzAzEENMN", + "stxyyzzEvwuvrswxvwklhirCijLMIJfgtuclef8cDJjp67ghCFjkjpJKstEN89uvbhpxfgvD" + "rsuvkl23ghfojkhixyKL4bklclijFGDJ8cJKKL12fgyz45MNghlmzAwxfg34HIjkxybhklyz" + "56NOzErC45jkmnde2334efjphiGHENpx2301ghfolmjkklNOfgottufojkfguvvD5678uvot" + "67ijhi56DJjpsttuCFnquvvwrs45fo4bclwxxyklijIJefuvyz56zEpxdeeffojkjpotghtu" + "uvklvwwx12bhxyfoyzrCcl2312hipx8cefad0aOPvDvw78adst67zADJ78deefijfoclklfg" + "wxvD56fojk01fgrsklvwtu45wxIJCF4bclHIGHHIghIJDJ45vDuvABtujpxyadvw56histij" + "bh8c78wx0a67pxzAothi5645fovwyzqB4badxyzAwxAB34uvstfgbh4bbhhighbhfgtufouv" + "vwijjkwxkllmpxmnotlmkljk_01246789acdefijklmnoprstuvyzABCEFGHIKLMNO", + "deghhiIJCFfgefFGijHIKL78deghpx89zEfgwxvwLMklMNbhghhilmNOfootadOPuvjpENzA" + "st4bABpxtu67NOxyyzJK45effgghwxbhjkijjpKLstkl8cjkvwrsmnuvclpxGHIJxy4b45vD" + "yz34hifo56klrC8cdeqB78CFstclzA458cotfoijstwxjkABHIfg56lmzEghrsbhEN4befhi" + "nqjpJKfostpx67yz34otDJvwfoxyyzzEzAklfgrCghENjk89jpLMwxklKLstvDABuvtupxLM" + "ot2378adMN34efjkdeclyzkljkjpijpxhibh4bCF453423lm12NO010aFGadqBdeef236756" + "mnrsGHjpfoot8cfgtunqmnuvlm34nqfovwqBstwxOPuvotsttufopxrCfgCFrCxyghjpFGrs" + "bhrCCF4brC45stwxtuuvvDvwbhDJclvDlmDJvD34rsvwhiijwxIJ23clhixyyz1223344556" + "67788c78bh6756453423_012345689abdfghijklmnopqrstuvwxyzABCDFGHIKMNOP", + "34jkmn23ijtufootfokl56xyeffo4bjppxxydenqzAENzENO67clhiijqBjkadABGHHIzAEN" + "zEbh0ayzxy3423jp8cklpxlm4bgh01wxvwvDhiijjpmnpxxyjkDJLMENjpjkzEOPNOnqJKkl" + "ENzEqByzxyclpx8c78wxOPjkvwuvIJtustxyjpotfofgzEMNijENzEghyzbhfoxywxxyyzAB" + "xywxvwuvtuotfoefdead0a011223344bbhhiijjkkllmmnnqqBABzAstjkfo238cotpxwxrs" + "xyrC01yzvDtuvwzAAByzxypxjpjkkllmmnnqqBABwxpxCFFGwxvwDJvDvwwxpxGHjkij45bh" + "DJ4bbhijjppxwxvwuvtuotfofggh45cleffgJKstfojkotKLfodeadstef56klfo458cghot" + "rs78strCotclfohiij0a01122334455667788cclkljkijhighfgefdersCFFGJKHIrCef89" + "ghrshistijfootjkfotukluvvD8cclklefjk8crsrCCFFGGHHIIJDJvDuvtuijJKhiKLgh", + "wxghadpxjpvwwxtuvwstijnqvDxyvwwxpxvwjpDJIJhiwxuvJKbhFGtuijvwwxxyjk23yzIJ" + "vDKLfgklstghfouv6756zArsABijpxJKxy12LMottuhiclHIIJot8cyzvwijzEwxzAdepx4b" + "zE89lmJK45xyfgvwefjppxuvjk4bstxyijbhghklKLENfgmn34fo4bvDclhiij8cdeJK89GH" + "ABHI7834jk01klefIJotstdeNOJKGHlmqBadDJHI2334jpijbhyzOPhiijjkjprszEpxwxGH" + "rCCFrsstdeeffootstrsrCCFFGGHHIIJDJvDvwwxpxjpjkklcl8c78675645342312010aEN" + "ghfgNOKL23vDklgh01jknqABjpLMzArsbhOPpxyz344bbhwxDJhizEfomnxyyzzEENMNLMKL" + "JKDJvDvwfgwxzEuvABvwuvstpxjpijhighfgfoottuuvvwzAjpbh4bxywxvwuvsttufoef78" + "dead4556deeffoottuuvvwwxpxjpjkklcl8c78675645342312010almxyclrsmnnqqBABzA" + "zE", + "rsDJlmghwxzEqBIJjkbhENcl2312yz4bhiturCfgHIvDCFotDJzA01px78vwuvtuvDnqjpst" + "pxfootwxkl67rsNO8cclMNijghDJfgJKjpefxywxpxyzDJxyvwyz45AB8c56uv78vDdejpqB" + "KL8cIJpxjphirCLMijpxwxvw4534bhuvjkDJtuhizEvDij4bstrspxvwrCyzCFJKjpbhpxxy" + "pxkljkjpKLpxOPghyzwxcl8c34vwvDothifo12DJ23fgzEuvlmstvDvw34DJklzAotfoyzwx" + "FGENpx4bJKghefhideijCFjkGHadst0acljp8cpxxyyzzAbhAB23wx89rCdeotmnnqvwrszE" + "yzDJhiuvxytupx12rCqBst8cotvDuvfoCFclklefjknqjpvDpxijottuxyuvjklmyzmnDJJK" + "vDzAlmdezEklENMNNOMNLMFGMNNOKLOPJKGHDJvDuvtuABNOotfoMNadfgotjkstrsrCCFFG" + "CF0arC01rsst0aIJotfofgadghbh_" + "01234567abcdefghijmnopqrstuwxyABCDEFGHIJKLMNOP", + "hiefuvMNENzE8cbhkljklmmnDJ78IJdexyclfoHI34klwxvw8cfgENtuABuv4bot4589fotu" + "ghijjphiclNOyzpxstefvDuvDJtuotvDnqwxfodeJK56rsxy67DJjkklstpx78KLotLMfo01" + "ad0afgJKyzghrCjpKLpxbhwxvwENijzEvDdeadCFwxpxrsyzjkjpOPFGENrCpxDJhiwxCFjk" + "vwJKMNbhfouvtucl01xypxijjpIJpx8c4bwxvwvDuvNObhpxclOPtuHIijotjpstklhitulm" + "DJjkrsjpforCuvpxefijfodemnklwxadvwvDxyCFyzENwxzAxyyzxywx45jppxvwxyzEABlm" + "debhqBuv0apxjpjkkllmmnnqqBABzAyz34zE45ENtuijwx4bNOst23otfoMNfgjkfopx45hi" + "56tuuv4534vDuvtuotFGst56foDJrsfgghhifgstJKKLfootLMfofg4bKLstijjphibhJKpx" + "hi12ij01wx4b0a3401122334jkDJ4b_" + "1356789abcdeghjklmnopqrstuvwxyABCDEFGHIJKLMNOP", + "vDfghi4bjpvwyz89jkwxDJklpx45bhjpuvJK67vw8cKL34lmef23tude4bijwxjk01xyhiwx" + "zEbhotENmnijuvklIJ34JK23vDnqHItuuvqB56ABzA124bIJNOvwstfohiwxOP78pxjpadij" + "otjpefdetuhighfoyzxyuvefrsdepxbh23hijpijhiotzAbhjpGHHIrCvDad4bDJtujkkllm" + "mnnqqBABzAyzxypx3445cl23JKvDKLzELMMNKLvwIJyz0axyNOklENzEyz8c4bxyadde56bh" + "jppxwxst6778vweffovDjpDJNOjkENuvvDpxghvwDJjpjkbhpxwxIJvwkllm4bvDJKDJclvD" + "56uvIJtuGHotOPfoFGfgstGHrsstmnfotu8cHIuvlm45IJvwJKpxjpjkcl56mn67KLLM788c" + "nq56clkl45jkjppxwxKLJKqB34vwDJCFAB23vDMN893401ENrCuvtuCFotfoDJefstdeadzE" + "JK0a45KLzA561267zEENMNLMMNENzEAB_" + "012345679abcdefghjklmnqrstuvwxzABCDEGHIJKLMNOP", + "jpxyjkFGwxkl56rC78GHDJpxzAHI6701vDyzvwxyjpmnpxcl12ABwxvw8cuvzEvwyzENij78" + "hijkNOJKxyOPijhighklzEDJKLjpMNpxyzhixypxijlmjpyzpxxyENmnijhinqzEyzJKIJxy" + "NOpxjpENzEbhLMclKLijuvtuotfofgghhiijjppxwxLMbheffgyzstotfoJKrsrCvDkl4buv" + "px34mnCFvwghhi8cjkMN23jpjkHIpxrCwxxyij78kltupxjkcllmklvwotstjkijhighbhfg" + "ghhiefvDjpijjkhirsbhde4b34IJadfoefdeqBfgrC45klmnbhadghhiijhibh4b45566778" + "8cclklefjpCFfoKLpxxy45bhstotfgfofg0aadghbh4bbhLMFG89tudeghGH01yzCFMNrCuv" + "strsrCCFFGGHHIIJDJvDuvefzAottuENuvvDzEzAnqABmnlmcl8c78ENMNLMKLJKDJvDuvtu" + "otfofgghbh4b455667788ccllmmnnqqBABzAstotfo45stfgghbh34231223344bbhghfgef" + "dead0a0145", + "89wxuvLMbhvwtuMNpx12uvjpjkwxxyvwtuwx4befyzklclxypxzEjpnqyzzAotqBnq8chi45" + "vDNODJfgxyABIJtuuvpxHIbhdeijtughzA4bvwGH56wx78fgbhjp45EN4b67hifoijxyghst" + "zEuvyzxyNOpxjpOPFGbhghefotsttu4bstfgrsENrCgh34rs23hiad4bijsttujkkluvvDlm" + "vwvDDJfoJKvDDJwxfgpxIJKLvwxymnjpLMMNENzEyzxywxvwvDDJJKHIbhghNOde4bbh344b" + "ENbhhifgijbh34pxvw4bzE454bbhefNOhifgzAwxOPijbhjkklcl8cKL4blm3423344bvwcl" + "jpbhpxwxjkghklvDmnDJfgvwnqijclhivDDJefABuvijqBtuotIJ78fodeDJefjpdeefGHfo" + "ottuuvvwwx67vDABvwwxadvwvDDJIJ0a01JKDJvDuvpxtuotfoHIefdeaddeeffoottuuvvD" + "DJJKKLjkjpjkpxLMklclwxvw8c89vDIJDJIJ8cvDvwwxpxHIclGHkljpjkjp_" + "1345679abcdefhjklmnopqrstuvwxyzABDEFGHIJKLMNOP", + "4bDJ45nqijghhiGHfgbhijjpjk4bklpxcljphixy34wxyzzEvwij12vDpxjpwxvwxyjk0ahi" + "ENklot56qB01ij23bhIJMNJKlm89pxadyzDJxyABfovD0azAyzhizEFGijwxIJ01jpENHI4b" + "pxDJGHgh45mnxywxyzzEvwuv8cclijtuhifgghijENNOMNLMxyKLjpMNpxxyJKOPDJvDjkIJ" + "uvENyzwxhiDJijklhitustjktughijhiijjppxwxvwuvtuotfofgxy89bh8crspxijsthiij" + "bhfojppx4bbhrsghJKxyyztuuvvwzEwxxyqBABzAyzxywxvwuvtuotfofgghbh4b45566778" + "8ccllmmn5634EN4bbhforCklhistNOENzErsstyzxypxjkrCijhijpotfostefijbh4bhibh" + "34de23454bbhCFghfoadrsrCCFmndersst34otlmtuuvvDfofgghhiijjppxxyyzzEENMNLM" + "KLJKDJvDuvtuefmnABnqmnkl12deMNjk23ijEN344556highfoottuzEuvvDIJDJJKqBABzA" + "zEENMNLMKLJKDJvDuvtuotfofgghhiijjkkllmmn67HI12", + "IJwxtudeklclvwijjkhivDghkllmfgbhijNOHI12jpGHhiijJKMN4bghmnbhnqefENjkpx01" + "adzELMzAfoMN0ayzkl4bjpjkotdeadKLklfg45xywxqB4bfovwuvDJghtuLMhibh56ENotst" + "zEfgrsghfovDOP67rCuv78MNDJvDbh4bIJDJvDefJK45DJABFGdelmpxwxEN56344bijklvw" + "vDbhjpuvDJtuotIJjkkllmfoNOuvmnOPlmijkljkfofgghhiijjkkllmmnnqqBABzAyzxywx" + "vwuvtufostKLmnuvJKvDHIIJzEDJENpxHIjplmvDrsklhiijjppxwxvwuvtuotfofgjkyzst" + "footfoefGHfoCFuvrCJKzAABrsijhiijjkkllmmnnqqBABzAyzxywxvwuvtuotfofgmnghfo" + "CFpxlmotjprCFGMNzECFHIIJDJvDjk455667788cclkljkjppxwxvwvDDJIJHIGHFGCFrCrs" + "stotfofgghbh78cl563445yz233412234b01bhhiijzEjpKLxypxjpijhibh4byz342345zA" + "AB56126778898cclkljkjpzAyzxypxjpjkkllmmnnqqBwx89", + "ghfohi34ijjppxxyfgAB8cghhizAbhijjk78jpeffgghABhi4b23ij01jppxyzxy67jpuvtu" + "otfofgghhiijjppxwxklLMst4556yzMNENzEyzclxyjp120abhhiotstijjppxjkwx4b8c01" + "jpjkyzklJKKLLMMNENzEyzxywxvwvDrscladfoIJjk34wxuv89jpbh234bNOhistdetupxEN" + "wxjpfg0ajkad78bhHIkl4bGHijFGot34vwrCOPwxlm67mnef23yz12zAvDCFjp4bqBABzAyz" + "xypxjpjkkllmmngh01DJdeuvzEyzxymn23jppxbhwxvw4bvDDJIJfoNO458c5634otOP4556" + "jpijjkhiHI4bij67bhDJ4bfgjkkl78wxfoghjk34zEhi23yz12tuzEvDDJJKKLLMMNENzEyz" + "xywxwxIJpxtuotfofgghhiijjppxwxvw233445DJhijk56foklHI67lmkltuvDwxjkpxotfo" + "fgghhiijjppxwxvwuvhighhiijjkklcl8c78675645342312010aaddeeffgklDJJKjkGHvD" + "DJKLvD89JKyzjpHIIJFGHIpxzApxjpjkkllmmnnqqBABzAyzJKKLLMCFstMN", + }; + + solutions["Small random tree 1"] = { + "12:34::", "141301", + "0214_01", "0214130113", + "130114_134", "1302140113", + "01130201_02", "01130201_123", + "01140201_134", "130114_01234", + "141301_01234", "01131402_0124", + "13011401_0123", "13140102_0123", + "0114020113_023", "13011401_01234", + "14011301_01234", "011301020114_134", + "0113020102_01234", "011402011302_1234", + "020113141302_1234", "0114130201130102_0123", + }; + + solutions["Small random tree 2"] = { + "1:234::", "01121413", "12131412", "12141301", + "14011201", "121401_02", "0112131413", "011214_034", + "1214011301", "141312_024", "011213_1234", "011413_0234", + "01131412_134", "01131413_013", "12011301_012", "131214_01234", + "131401_01234", "131412_01234", "140112_01234", "14011413_034", + "14120113_014", "141201_01234", "13141201_0234", "13141214_0134", + "12011401_01234", "1214131201_0234", "1412011301_0124", + }; + + solutions["Medium random tree 1"] = { + "12:456:3::::78:9:", + "68160102_48", + "010215231667_15", + "026801160167_28", + "1502231601166716_02458", + "151416230201166816_345", + "230223011615671679_235", + "6801166716791516_16789", + "23010279671667790116_039", + "0123022314166779_012345679", + "23681401166701026801_02468", + "1401021516681523677901160167", + "011615681416146779_0123456789", + "010216150179236716020102681614", + "160279016723022301161468151615", + "140116687902012302162367_134689", + "156701021679230102231468_023579", + "670216687914166779011516_056789", + "6716791567140116670214_12456789", + "022368160102681614010214_0123458", + "15160167160267231468790167167968", + "16236714790116011567026801161502", + "0201792302672316141514_0123456789", + "1601156716020168141623790102016714", + "161467010216230115681668_012346789", + "1614670116670268796715011602236815", + "7916672314160215016779671668_12349", + "010216150167230223161401_0123456789", + "0102152301677967160201026814_0234567", + "0102160123671602011568167914_1245679", + "67140216680116147967682302_0123456789", + "010215672368166701027967162301_0123567", + "161523016802671679016716230168_01345689", + "23166802010279166714167901236814_134689", + "1601026716236715796801160267791401166801", + "167901681614020167162302681523_012345789", + "67147916676802230115160201230201_12345789", + "0216156701230223146867796701166716_02356789", + "67791602146701142302231516016779671615_012345789", + }; + + solutions["Medium random tree 2"] = { + "123:::45::678:9::", + "575834_47", + "34030157_047", + "3558353435_58", + "350335025669_05", + "01033558575669_17", + "01346956035735_149", + "35033556353435_356", + "0235010301355835_123", + "0135560302356957583435", + "033435025756693501033558", + "570135340302585669355635", + "56585735033403020334_03467", + "35560335010269565734_013478", + "0201033503566957343556_01457", + "3401695669350335583534_13459", + "57020301355658033435_0125678", + "350203015658695657_0123456789", + "350356585669563558340301_03458", + "0158026957566903343534_02345689", + "03566958573557030201_0123456789", + "035835023403566956573557_024567", + "343558030156350369025603_014569", + "0201345758036956576934_123456789", + "03355801693403575635560203346902", + "3558566957030134033556026903350234", + "356957345635560103346901_012345689", + "02560369355758563435035669_01256789", + "3501563457355603346956580257_1245679", + "56690258573503345635566901_023456789", + "02033534015658350357350356_0123456789", + "343503583501020302573534566957_024578", + "560203340235586957563503010335_0234679", + "5635695603350103580135573469_123456789", + "69033501035801355734355669023503350257", + "3557340356356902035635035602_0123456789", + "3556580334350103573534566956350203023556", + "35566903350257033556356901030135_0123569", + "35563403355602576956033503695802_01256789", + "5635030103350302586903565735690301_01256789", + "5635690334025603350169035835030158_0123456789", + }; + + solutions["Large random tree 1"] = { + "1:2345:de::6::789::abc:hijk:fg::opq::::::t::lmn::::rs::::", + "kn149h69469k699h688b46_1hn", + "2e671214699i46kn689k9h8c_7en", + "kl9k9jcq2e8c684614122e14_elq", + "1514468b6801461214132e8cco_05b", + "co8c68kn699i9k8a46km699j679k69688aaf8a_46imno", + "9k67cp8c696867159i9h14it46co8c68or8aag6968cq8c_578hkp", + "os8c2d9jco6869468c129k8a8b149h46km696701af9i8ccq_bcdhjs", + "9j9kcp8c68cqit9i69682eos8c12cokn9k8c681469or469iit699j_ejpqst", + "9i2d120114it4669139kcp8ckm68698c9k68kncoos9i9k9i8ccp9kit_04diknpst", + "68kl2d9hit9i468a9j126869148aag468a9k14680115698c68cq8aaf8a_0468dhijl", + "os6912co688c8a67kl9i69142dco9j688c9k6915679h46146946699horag_2579dhils", + "it9j8akm69469k13681401698c8akn9iagco9j9k68468cor68af69co8c9i8aos684668_" + "34acfjmnt", + "6946129iafkm142e68018aor1368128c9kco8b6946689hag9j678a8c14688c8b462d1369" + "9i68128cco8c68it159ior_234679bcdefijmr", + "oraf468a159h9k149iag1269knkm1468it9j694668149j8ckl8a9i6869co019k1468698a" + "6869af9i46688ccoos6846co14_024579fghijlmot", + "cq8ait149iag468c691446co129h699k1446688c0114152d46cqkm2e8a8b69149j124668" + "69af149j46149k688a68af69688ccoorco_1268abefhjkmoqt", + "691246kl142d9j0168698cco8aor689h69689kit8aagcq8c68461468122e8c6869os469i" + "14it69689hcp8c6846co699kkn9k6967688c68_024678adhijlqrt", + "8cco12682d8c698a689k69klcoos4614afco9i158cit9k6846coor141201co8a699i6867" + "1369469j14cp8cag696846149k8akn9k15699h_23456789abcdfgilop", + "8a688cco69or9kcpknaf68138a468c682e12co14os699h8c2e6867km8aaf8aco9i689k8b" + "689jcq8c699i68it46149i699k0115km1446148aag8a_345679abcefhjkmpqr", + "9icq688b2d4669688a8ckm469k671468coorit8c69klcp139h68699iit4669149k8c46co" + "8akn016814af8a158c68agos69co9k46149j6967468a6846os8c1412682e144668148a0" + "1", + "8c469h688cor46klco699i9h8c9k14460112oskm1469co469k6812138c1415cqkm462d14" + "os9j122d1469689k8a6946af8c6846ag148aag1268462ecp128c6867_" + "1234abcdghijlmpqrs", + "699i699h67688cit9i8a1469469k8c1469158bklco8cagor9k126801co2e8b8ckn68cp12" + "9j6914469k2d141314af6968468c8a6846os149hcocq8ccp8c01_" + "01234689abceghijklnopqrt", + "69469h1468699k46cocq8cor8b68158a69km9j14670169af9h9i682d691246co149ios2e" + "9kit8a68699kkl019i138ckn688a461469129kcpkn688c68ag461413699iit8aco688c2d" + "46688a", + "cp0169682d46149h8c1346696812469k14cqitkl15co9k2e8ckl68698aos9kkm4614co12" + "689k8aaf2e14469i148a9kkl9k69688c8a9i12it9j699j688c8b68coorco_" + "0589abdefghjlopqst", + "68468a1412af688c2ecq138a699h679k6846af14128c68cp69kl9k468c2d140169coor8b" + "9i466968oskm8cco14469k9i14kn122dit149j69km689i4614ag8a6846ag158c14466901" + "68698b9j", + "cp9i1469468c8a149j68or12cq8a2d142e699h67699k4669kmitaf1413468b1514468c68" + "01co12or69os14468c9k69689kkl9i699kkn8a688bagco699i8a68itos9k699k46148c12" + "8aag2dkn", + "698c468acpafco146846129i159kkn2e8c14or68698a9k1301ag4614it6769os8bco8c12" + "co688b6846km14or469h129j9k2d12699h469i9jkl1446129k69469k_" + "0124568abcdeghjklnopqrst", + "8a01ag6869148c9k124613itkn8bco14152dkm8c6867692e9h8b8a684669or9icoos8ccp" + "68128c9k6968149jco694614128c8a9jcoaf68or8a6867af2e_" + "0123456789abcdefghijklmnopqrst", + "8c681412co698b8c682d9i46691415cp8a68699h12462e69ag9j8a688c466914it466769" + "os0114or689k12469icpit9k6968co2d148cafkm8a469k68kn46km8a_" + "01245689acdefghijkmopqrt", + "8aag01689i8c8a69co689horit6946af8cco8a9kkmaf6914688b9j9i15os9k46itkl1468" + "8c46686946co128b2e148c9k4668kn69138c68cp9k8ccq68148c4669146768129k461446" + "2d122d138c", + "6814699jcq8c68co139k8c9h6968cp46klkm8c9k67kn14688b69689i466901it14469ior" + "679kit8a01co1215kl6914af8c4612or14os682d12142dco8a4668148a8cos2e122e14ag" + "468a688c699hag", + "co13699k688bkl469i8cco68144615148aafor69129k2d468a8cit68cq69km01kn9kos8c" + "148a9jcoklos9haf46699iagcp46128a2e8ccp128aafag9kkn9k142e4667699i122e12_" + "23456789abefghilnopst", + "6968af9h698a678c9j68co8c9kor46cp68km14699i122e4669kl14af469hit688cag68cq" + "129k699i1446699k01688c142d8b12kn46682d018aco46681514ag1546688b_" + "0123456789abcdefghijklmnopqrst", + "1468468b67699i68698c13co8bit6701148c9h684612144668698c12knor2d68co8aaf15" + "8a68698c149j689i6968kl468c68cq9kcpos8c686968klco8aag8a6846142e1201152e_" + "012456789abcdehijlmnoprt", + "689k4614698c469k9i68km8ckl01cq8cit141569469k68699j8a8c6869coaf12os9i8a68" + "67af14ag8c8akl68agco699h462dor8b1469139k8c9hcp46144668462e12kn1446122d12" + "01_012478abcdfghijkloqst", + "1469159iit688c4669680114co9k8ccokl67136869oscp8b9k688a14kn8c46co68km8c14" + "orcoor69cpcq688c12cq461446699haf9i68122d129j69688a9j689k691446688a14ag12" + "8a6846142e121446688a2e9kaf", + "68kl46698c9k2dcq8b688a8c144668128ait14km01co8c6846co6814orag8a68co8caf69" + "469j9i14122d1269159h4614466812462e8ccpco121413os14466768co8c6846co141501" + "or46_0134578abcdghjlmoqrst", + "691468itco9i699k8corkn684614cq8a699j15122e8c46144668cp8a9k2daf461314km8c" + "466769it67kl9k13689h69688c8aco8c689iosco69688c8aagcp8a684614011446688a_" + "123456789abcdefghjklnopqrst", + "9h689i8a6946148b68678c0168co9k9h694614kl8c15orco9k68or46km8c699k1469469i" + "12kn2e689k1469128ccq688bit682d12468c6814129j8a2d46af8a9i69149k8c122d1268" + "8aag148a68_012345678bcdehikmnqrt", + "9k1446691446kl129k14461368km8a9i9h14af69688c9kkmcq462eag9iit1401149ikn69" + "9k688c67469j698aklcpco1415kn46or148cco469koskn9k9j699j6968121446688ccp46" + "1412_01234568abcdefghijklmnopqrs", + "9k8c9icp14co68os8c8ait69km12cq9k6769kl9j4614coos139kkn699i4668149h8c1246" + "af682e144615699kkn8b8aaf68148c1246agco2d126901or14co8c688b8a68468aag1401" + "1446_012356789abcefghijklmnopqst", + "698a46689k46146701138b8akl8c684614af8a6846158c12142dcq68af8c68cp12692dco" + "9h462e699j9kkl14os8c688ccoor12km698c9h012e688c9i9k699ikn9k46co67it9i8c69" + "68699kkm8c_0123456789abcdefghijklmnopqrst", + "678cco144669688b8a12699k2e1413ag6869688a4668kl9i9j6946af8c14it68469iit69" + "9k688ccq8a69688ckm14460114co46121468os2dorco698a8c469k69af9k688c69kn9k69" + "46co141501kn_0123456789abcdefghijklmnopqrst", + "67144669689i128c1446151446co688b692d12682d2e149h698aaf689k698cor6746co9j" + "itkl9i696846149j128a2e8ccq1468kn46ag9k688c1469cp13or6869019kkm9k69148c68" + "46698a9k14kl01_0123456789abcdefghijklmnopqrst", + "689kkl46698a8c67149kknco688b69136846148a9kkm69kl129i8c46692d14it46af699j" + "68678c01cq8a69144614699i681214699h46682e148c12co69461514os014669cocp6846" + "co14151446or68co8c8aco_012456789abcdfghijklmnoqrst", + }; + + solutions["Large random tree 2"] = { + "123:789a:fg:456::bcde:hij::::rst:::klm::q::::::nop:::::::", + "170103355b_7ck", + "lodk5d35dllnlp03015d1a3536_kno", + "5b17as350103355d1adm02012g02_7bs", + "arfq17012f031a1802366h03355ddllo_7qr", + "19arfq0118031a35175ddl2f02033503_5789qr", + "022g03dmas0219015d3503195d011a0135at_279gms", + "35as18020134ln195e03021a17366hdl5d5b_2348ns", + "as1aarlpfq18dlln1a6j365ddl352f5d5c0301035d02011a01ar_jnpqrs", + "011a0218ar2g01031a3501175eat03025d35dl1aln5basar015e0301355e35_" + "0278aegrt", + "35atdl015d0334lpdl2g1a02366h03355das03dl355b34351a1901366ilndl1a35at_" + "134bglpst", + "dl5d35dk366j5d6i34366hlodl033602ln03011aarfq355d2ffq35033501dl1adm020118" + "at_06hiklnoq", + "6h36fq18dl2f6jar016h355dlo1a34030135dlas035d1836dl02352f1a366i3603366jlo" + "0136022g_48hjloqrs", + "025d2f013503lp5d180135031a5b02at352f035e2g0135dmdl5c173603022f011a2g6h36" + "fq35dk035ddldk01351a6jln36at_0123678adfghkpt", + "010203dk2g01341aln3635015ddlas6j36355d035e02dl5c2f2g35fq36lpar5b6i36dl03" + "351a5ddmdl0335loln5b01022g020301_13456abcgjknopr", + "036hlo352f02fqdk5ddl2f0335015b365d03at6h35fq185c6ilpdl1a01dm1aas03013503" + "175d3435011a020336025b6i2f03fq01192f_0245bcdfhikmoqt", + "5d5b18ar03dl3503ln011a5elp0335as0103172f02dk5d350301dl2f1a5dat022g5b0201" + "5c35365ddl6ilpdl5d355c031a350103366j_025678bceflprst", + "dl01355e17031801355d03lp1a36020301dmfq355ddl6harat2g02011a2g5c0301as196i" + "0301ln3436355b6j032f6h022f5ddkdl355ddl35lo03011a18lnas", + "0102at2g6i03dllodl17ln5d5e181a01as35dl03dk025d2f34lpfq01dllndm3603010335" + "1a035d01dkat03355dardk035d191a01180319355c36_01234589deiklmnrst", + "1aarlnas01355c0302dl19181a2f2g355d36lpdl6j01at6i020335175d01lpdl2f03dm35" + "195dln0103fq2f36dm025e1a6i2g011a355d035b355ddl5d35_1356789abghijmnpst", + "2fdm03351a5b0103ln35dl5dar0203lo34dl191a0301ln03355d0318fq015e031alodm36" + "atas2f03022f01dk036i171afq01363503365d0135365d2f6j361a_" + "03456abcefikmnoqrs", + "03350102185e5d0335362f6jdk011aar17at5d350301dllofq025d1a013603ar356hdlln" + "19012g365ddm5b6i352f5b03022g345c5d35dl01035dlp01asdl1aaslp010334dm36355c" + "366h", + "1a0118355c02015b6i0335as5ddl36lnlp355d6h36351a17dmfq2g022f6j1936035c3402" + "011a02dl5d6i03ar01atln2ffq1a2f033435010203as35015b2gdm18_" + "4589abcdghijklmnopqst", + "356hdk366jdlln035ddl35fq02lo032g5b355d0201dl032flp35175d1a36dm01as03355d" + "341adkatdl0119351a0301ar355e1a5d5car175d5b0203dl35lo5b3503_" + "0123456789abcdfhjklmnopqrst", + "0135ln18fq012f3602dk03365d2fdk35fq2f0203dm016h6i355d5c1aar5bat3601dl0301" + "343518361alo5d19dmas350103dldk35016j1a3601022g025d03dl3601lo6j1aat1a_" + "125689abdijkmnoqst", + "015d355edk5d3503dllo01ar5b1a0234033401at3602171a0301033503185b5c01192f02" + "03362ffq3503as1a2g6h01033519ar5ddkdmdlln5c365d0235365ddldm036ilp36036j02" + "dl2g5d3635365ddl", + "175ddl0103355c1a01lpas1a030235035d2fdl35ln5d6j01dl03ardm022g35035d195e01" + "1a36345c350301dk366h365d0319lodllo355d34022fdm35dk011aat366j36351a01_" + "0123489acdefgjklmoprs", + "355blp192f35015c03fq356i5e6h36355d0103dm1a013517036j1aat02352g5ddl35ln03" + "5c1a5d355d011adllploas1adk0335dl011a5caras023534035d350301180103355d_" + "134569abcdefghmnopqst", + "5d355b03dm5d012f18dk173502015c03366h1901021aar032g01fq03366i021a35365das" + "0302dl35lnlo0103015d351a5c6jdllolp36ar5e34at2f010301351a02035d5e01dl1903" + "5d355dat03lp022ffq", + "01ln351a0302ar2fdm345c01351a5d36aras03026i36035bdl35015dlolpdk036j6i192g" + "3602356j5d2gdl01031a1801at0334lp351aas03170103355e351734355b5d5b5ddk_" + "012345679abefgijkmnoprst", + "35365b2f5e6j5d6i3503fqdl365c01ln35031734015d1adl35at0301lo022f5ddl18lp35" + "6h015d1902360302352g0102dk5d01dkdm03181a01031a355d0301ar1adm0103dl363503" + "as011a0103356h5dasdlln5c", + "363503at5clo6h353601035ddl3502lp035ddl2ffq01355b02035d36026idm35031a0103" + "2f34ar021a01355d1903356j03360103025e172f35as19365d6jdk5dfq3503011a0103at" + "35_0123569cdefhjklmopqrt", + "36010335as1a02015d0335ar1701dm175e03352ffq18015d2f5b181a03356idm36dl025d" + "352g2flodl5d6h34355d03dk36lpdl5d6ilp3501031a01at1a012g5c3503011aas1a0103" + "35_02345678bcdegijlmnopqrst", + "5ddl35035e355dln01dl19lo02012g0336186j5b355d010336172f0203011aas6i363503" + "015b1a5c35dl6har03lp01345d0335dk1a5ddlat36350103015c5d355ddm1a17366h_" + "0123456789abcdefghijklmnopqrst", + "356i022f1a01021703lp19012g5das35dl365ddklpdl5c351a5d0335ln025car01asat1a" + "03as5eat01dk6h2f366jdm5d343502fq2f18dl02fq035e01180336dm6h355ddm5d173534" + "_0123456789abcdefghijkmopqst", + "as1a5dat01fqdk1903as013635ar5b185d6h03dl351702035d35lo011adl2g36lp6i035d" + "ln350301025bdl36dm6j5c35032f5c5e355d366j35dk5d02342gdl03lndl5d355e011734" + "_0123456789bcdefgijklmnoqrst", + "011a35ar03025b5c5e5d35dl03020134dk2f5d3502012f1703011903dm366hfq5dlo6j01" + "03dllp360118032gdm2f02dl0103355d342g1adl01356i3603ar6i355b01at1aloat18_" + "0123456789abcdefghijklmnopqrst", + "355d350301dl1aar02365b6j0301ln35fq022f5d031adldkat01lo031alp3602356h365b" + "6i5c5e355d035c0136dm355d183503dm0201352g02dl5d35dl0301ln34170319010334_" + "0123456789abcdefghijklmnopqrst", + "5d01ln2f196h18033501dk365d6hdl35035dlo5b1a02dl35032g6ifq02dkar2f5c350301" + "36036j1a5ddm5dat6i02350301fq171alpdl35ar015d35dllp0301355ddk5d3534030117" + "0103_13456789abcdfghijklnpqrt", + "022f0336025d011a036j012g170335dllp5dar02033501dl1alp5d36at18dk5b35lo5d03" + "dl6h01lo18355d03366hln355e5c5b6i1901dlar5d1a1903as3635011a015d02032ffq2f" + "dl02as03366i_0136789abcdeghijklnoprst", + "03011aar34355ddk0301023617lo03011a35at5d03dl355d01dm366i3536ln031a6j0136" + "6h355ddl03lp2f5b35035d02352fdl03fq355b2g02ln015c5e2ffq02351803355ddk5c02" + "19182g_0123456789abcdefghijklmnopqrst", + "35345d025edm011a2f03015d3635dl03ar025d2f1adlarlp5b2g36016hdl3503360102fq" + "as036i183501361a03as022g5d355c2f3601dl023503016i2fat1aat186j36355dlndlln" + "5d5b6j_0123456789abcdefghijklmnopqrst", + "5d02012f35033618dk6j5ddllp0135fq175d02dl340335ln015d1aas2g0301355b1adl5c" + "3635025dln5b032f36dm016i03ar5d351aar36036h025d0136loat1adllo5dat03363519" + "03010319355d_0123456789abcdefghijklmnopqrst", + "5d0103dl1a35025d5c012flp6hat341a02030136dlfq03196i0136356j5d36036h35lodl" + "1701ardm1a5e035d02363534lnas010335032f5b01182g015ddk035d35035ddm020335dl" + "2g5ddk3503011a1817_012345789acdefghijklmnpqrst", + "020301341736355e03016j351a365b03355das2f5b020118035c2fdm2g6h353601035d02" + "2g0135dk5d1a6idlar03lp011a035c01fq5d35030134365d19dldk2f030102loatdl1a5d" + "0103at18352f5d36dl_0123456789abcdefghijklmnopqrst", + }; + + solutions["Large random tree 3"] = { + "123:ab:9:4:567::8:t::efg:h:cd:jklm:::qrs:i:nop:::::::::::", + "fqhphnfrckcj_kpq", + "fs019f0229029f03fr3445_1hs", + "031b1a9g29022903349f46fr68_3bg", + "68461b0103340301cl46bc681bbccm_8bl", + "hp7tcl46bcah3403011bbchncjah1a011aah02_67ilnp", + "cj68hn469f1bfr01299f3403bc1b019g02291b9fbcfr_8bfjnr", + "34039f0229fs021b451abc9f01fq9g1bbd34ah1b0302299e_45bcfs", + "7t4734hp4503011a3403ahhofq1a469f02340329fs9ffr011b02290102bd1bbc011bck9g" + "bc_56pqst", + "036801cjfq349f1b45bc02cl29019e021b03291a3401021b45ahbc1b01hncm03021aahhp" + "_23458bejq", + "9ecl7t349f4703021bbc3429466801bd03fs9f1b0102bc451bcjbc013403341a01clahhp" + "_0245beflt", + "fq9gah1b68460301bd29bc349f020129461bbc1acl03fr9f020368ahfs0129bc34029fhn" + "294502_08bcdghqr", + "031a011a34ah4502hofs9ggi03299e0201290229033403469e1a9f9gah021b29bc02cm68" + "9ffs011bbd_345aeghis", + "45ah299ffq9g021acjho2901bc02039g1b344702gi9g299f022902fr299f0129021a2901" + "bc1bckah01hn1a03019ffq1a022902bd_0135679cfhjmoqr", + "bc347tgifr1b9g1a1b45clhp4603bd1b68ah46bc029e9fcm9ggi011b29bc020134030147" + "ck341bbc68011a471b0334cj7t450301bc0334bd0347_2458abcdiklmprt", + "1aahclbccj291b9f01033446fsbc0268291acjhn1b03cm9fah020134fqhobc03hn472934" + "039gclgi019f1b02011aah03344634030102299f020102_02469afhjlmnoqs", + "fr9g451b01029ebdgi1b01ck039gcj2934bc1aah1bck469f02fq0302hp29ahgi9fcm01hp" + "02bc29011a34021b010229fr03bc1b01cmbc1b03cl349f29cj_01234569bdgijkmpqr", + "ck1bhnbccj1a9g011b02giah2901027t9g1b47bc1afr1b019e9fbcho1b1aclahfsbc03cm" + "bc291a340102461a039g68hogi9g293402469f299f030203fs299f2934_" + "03567abgijklnoqrst", + "1b469ebd011bahhn1aahhpbccl03bccj29014503341a0301ah1a0203344729ahhn9f34ho" + "03gi2901frah021b017tbccm9g1a2902019g1b299fbc02011b1a29029ggi9g29bc9ffs9f" + "_045bdefhijlmnop", + "0103cl9g02bc2934ck1b4703gi34ho0102ah0334299fbc46451b291a013403bcck1bahbc" + "cjbchnfs1b01022934479e7thp9f1aah291a0103hp34fq0302011b29bccmbc471b9g9f01" + "_0234579abefgiklops", + "1a2934030102ah9f46ho34fq032947gi0234011b039g297t9e0229bc030102cm9fgifrcj" + "4534290301681aah029f01477tfs34hn1a1b45bc4603clahhn341a1b4601hpahbd68hp02" + "031bfq01bc03291b02ck011bbc02", + "030134021b45bccm0129039f1b01fqbd1a021b01bc299g9fcl02031b34ah46ho031a01bc" + "ckcjah031agi34461bfqbc9gfr4703ah29fs0268hn019e29029f2903029fah1a34ahhp03" + "ahfr47347t47cj340301021b1aahhn", + "ahbc0234684729019f031ahnahfr011a023403299f0246019e342947hp7t469g9e02011b" + "bcahho4703cl29fq9f29ckfr020301hp3403cj46ck6801bdcj1b0102bccjfq47299e2902" + "01bcbd1a1bbd1b_1245789bcdefhjknpqrst", + "46039f9g0134681abcckahfq0301hpbd1b1a024634gi2947ah7t9e011ahn03013402bc29" + "9f1bah9ghocmbc1a1b010229clbdah9f1b03bc1bfr9fcj010229bc1b03bc1a9f01gi34fq" + "fs03453403029f29029f470334fs03011aahhn", + "9f011a0229031bfs34bc9g7t47011bbc7tgi9fclck0203ahbd0201293402011b0301bc03" + "fq9f1bcjbc2947349g1a0129022901gicm9f1bahhpfs46ah9f1a1bbdbc681bahhoah1a34" + "1bbc03011bbd1b0103_012346abcdefghijkmqst", + "ho9f0229030102fs1bbc29349f0345gi3401cm47fr1a7t03ah01341a29ah034602019g29" + "1b02bccjcm01hn477thp1b9g9f689e2902bcgi9g01299ffrcl0203bc293403019f4503ah" + "fq1abd01ah9f290201hp_01234579abcdfhimoprst", + "1bbc011a03ah3401ck1a46031b0134bd0229bchp9g1a0334ah1a0145031b340301hnbc47" + "02342903cjbc1b3468029f0346gi34fr03297t9f470168ah021a9g34gifq0334ahfs9f01" + "291b9ebc0201cl291ahocmah1abc01fsho021b011bbc", + "020103299f1b02012946ho34bdbc461b02gi1a9fahckfsbc03470168341ahnah03011a29" + "349f479g29027t01fq29hnhp9ffrah299fhp021a1b299e019f46bc34020329cjcl344701" + "bc1b01bc9ffr29cm4602_012346789abcdfijklnopqrs", + "1aah03hp341bahcj1afq46bdbcah1bhncm0102032968ahbcckcj1b9f0234030134477t03" + "bc1a34ah0302cj29ho01fs9fbc03fq45340302ah29034668469g34030229fs011a021bah" + "01bc02hncl9ebc29021b0102_2346789abceghjmnopqrs", + "9g29011b9ffr03bc34010203gi1a1b9gck29cj01ahfs1a9f03020102fq1bbd2934hn9e02" + "ah1b0147037t34ho47031b46bc011b1aah68cl1abc1b01hpah0334bc463403011aah1a01" + "02299f2902033403022945_0134789abcdefghijlopqrst", + "9f031b46bc7tcmfs01340302681b460129ckbd9f340368471a34fq9e9f0102ah1a1b45fr" + "29hnah1a1bho01hp9fahbc1bcjbcfr01021a011a9g291b0302ahhp34ah03291a9ggi1b9g" + "29bcclbc1b0102011bbc_0123456789abcdfghjklmnpqrst", + "bc1bbd294634cl9f7t68fs019e02461b290103341a9ebc02hp9gck0103ah1a3401hn4703" + "3446ah031a01021b29bchocj9ffq01frhn0229011b9g45029f013403341aahhpbcclcmbc" + "47ah1a1b7t01022902011bbd010229_245689cdegklmnprst", + "01349ffq1b29bc029f2902011a1bfrcm9fahhn1abdfs029e29ahhnho029e01461b03bc34" + "1acl03011b479g1abcahhp027t34291b020301341b9g1agibcck1bbc1b4701037t4534cj" + "bc03011b1a45bccjahho_0123456789abcdefghijklmnopqrst", + "9f1b341a29024501fr9e2902ahbd9g291b031aho34ahbc9f1b460145fscjcmbc1b0229cj" + "019e68471aclbcah03hp1b02039fah291a9f34474602cl7t03013403ahfq34477t029f46" + "2968hnah021a011aah02299fhn_0123456789abcdefghijklmnopqrst", + "01471a027tfqah0301299g34469f0334681aah0147021b01hoahbc291a02fr0301bd3445" + "cl1b1agi9fahhnckfscjah022946bc9f021a1b03290201341abc032934fqah9e477t0229" + "cj02hp9f9g2902ah011a014702ahhp299e_01256789abceghijlnopqrst", + "34fq9f290201fr9f1a29ah02019ehn1a29029g0301fsgi299g1bgi0229ah01hpbc9f3403" + "4734bd03cmbc1b011aah02cjbc1b46cj0334hnckhobccl030103453468032902011bbc1a" + "ck299e1bahho0103463403011a1b46bd_0123456789abcdefghijklmnopqrst", + "46hn01cj02ah1b29019eck02cmbc291bbd9g1a0102ahcjhocl341a29ah1a01031b01hn02" + "29bc9ffs9e3447cl1b9f01342903340145341bbcfq1b01036801341ahpah1a01hp027t29" + "9f01fr9g1bgibccm9gbc1b9f29010203293403_02356789abdefghjlmnopqrs", + "bc1a299e01ah03341a1b4668ck0102ah1ahnahhobc1a01ah1bcm1a030201bc29349f0229" + "cl0347fs019ffq021b0134bc03451b0201cj340329029g2902bd019f347t1b4701bdfr46" + "2902297t9g01bc1bbc9f342901030134461aah02gihp01ah1aah0102299ffqho", + "021a34034729019f1a1b02fr34ho01299e037tah021bbc2934459fcj01fqbd341ahnah1a" + "011b020302hn0129340203469f45fs473468470301291bbcclhpcm021bah011b1a03bc46" + "3446ck0301681bbc1abd02cjah299g9ehp_0123456789abcdefghijklmnopqrst", + "0302bccl9f3429039f011b34fr02453401bc1bclbdfs299g9fgi2902ck9g01029e1b2902" + "010334bc9g1a46ah01031b684734fqcjgibd03bc011b46bccm9fho34291a03bc1bbc1ack" + "01ahhp1a0103ah02341a014702cj299fho1ahp_0123456789acdfhijklmnopqrst", + "ah01299fcjho29029g01031a2902fsfqahhn019e9f291b9gfr039fgi0201341a460334ah" + "03011bbd471a0334031b02bc7t29019f03ckhpcl453403019g47fr021b45cm0102fqbc29" + "9g1bbdcm0201gi1a02ahhp1a010334453403011a_0123456789bcdfghijklmnopqrs", + "9g2902039e293446014503gi341bbdbc011b03bc0102cj471bbc1b290168cl34031b9ebc" + "479fck021b29bc460234011a9gahhp9ffr039f0229029f7t477t3403011b1a46fsahbccm" + "01bc029f2902019f1bfqbc01029f299f02011aahckho_" + "0123456789abcdefghijklmnopqrst", + "03ck291a9eah01033446021bbc01030234cj1b29039fbd01hpfq1b0229bdbc9g019e1bah" + "0345bc1aah6834hnahcm9f4701cl03bc29011a020102ah29gi9g3403291b0229gibc03cj" + "3446019f68fs9f340334472901027t1ahoahho1a0102299f_" + "0123456789abcdefghijklmnopqrst", + "bc9f1b01021acl012902bc1bfqck29011a9gahbd031b0129ho341aah1agi0229hn039f46" + "29fs3447030134030234684501ah1a299f29013403017t02ahhp1afrah1a34471b01bc1b" + "017t0229cjbc9g2946cj02bd34033402299g46gi9fcmbc291b02011b02299fbccmfq_" + "0123456789abcdefghijklmnopqrst", + "34469g9f0334cm0268gi29fq0103341a45bc029fah29011b34bdhn0334fr47013403341b" + "017t034734031abccj01ah46ho02299e341b011a03bccl1bbc9g3429021a1b011bgi0245" + "29ahckbchp1acmck9f0103fr344529340302011bbd01022901fs9f1a29fsahhp021a0102" + "291a9g_012345678adefghijklmnopqrst", + }; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp new file mode 100644 index 0000000000..82bcfa3b81 --- /dev/null +++ b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp @@ -0,0 +1,67 @@ +#ifndef _TKET_TESTS_TokenSwapping_Data_FixedCompleteSolutions_H_ +#define _TKET_TESTS_TokenSwapping_Data_FixedCompleteSolutions_H_ +#include +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** These store complete solutions to fixed token swapping problems. + * This is similar to FixedSwapSequences. + * However, it is different in several ways: + * + * (1) The solutions have not been processed further (thus, we + * do not expect the swap sequences to be irreducible). + * In particular, we did not relabel the vertices of the solutions, + * so they will not be contiguous. + * + * (2) The full set of edges passed into the original solver is preserved + * (thus, we expect more variety in possible solutions; there may be + * more shortcuts making use of different edges). + * In particular, all architectures are connected, so there should be + * NO errors when running our TSA. + * + * (3) Several real architectures are included. + * + * I have tried to include a reasonable range of architectures + * and problem sizes. + * + * Thus, this allows a direct comparison between our TSA + * and the one used to generate these solutions, and hopefully will show + * improvements more clearly over time. + * These are also hopefully more realistic problems. + * However, as noted also in FixedSwapSequences, we must remember that: + * + * (a) relabelling vertices will, in most cases, give different solutions + * [even though the problems are "isomorphic"]; this is just an unavoidable + * consequence of the token swapping problem being hard and, presumably, + * often having many "nonisomorphic" optimal solutions [although this hasn't + * been precisely defined]. Thus, we can never REALLY do a direct comparison + * because we're always going to get small differences just by "chance", + * depending upon our vertex labelling; + * + * (b) Many algorithms involve an RNG and hence do not give the same solution + * each time (although, our TSAs are careful always to reset the RNG seed, + * so should actually be deterministic). + */ +struct FixedCompleteSolutions { + // KEY: the architecture name + // VALUE: the problems, encoded as strings; the first element + // encodes the complete collection of edges (which cannot be deduced from the + // solution swaps because, of course, some edges might be unused). The + // remaining elements are the calculated solutions to actual problems, with + // the same encoding as in FixedSwapSequences. Thus the tokens are given, but + // the vertex mapping is not, since it can be deduced from the swaps as + // usual. + std::map> solutions; + + // Fill in all the problem data upon construction. + FixedCompleteSolutions(); +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp b/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp new file mode 100644 index 0000000000..d0c5df6359 --- /dev/null +++ b/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp @@ -0,0 +1,2197 @@ +#include "FixedSwapSequences.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +FixedSwapSequences::FixedSwapSequences() + : + + full{ + "01120123", + "01120130", + "01122301", + "01123401", + "01201301", + "01201334", + "01231201", + "01231224", + "01233001", + "0112012334", + "0112030103", + "0112300130", + "0112340130", + "0120032042", + "0120034220", + "0120130114", + "0123040112", + "0123120124", + "0123201445", + "0123300123", + "011230011224", + "011234231201", + "011234233401", + "012032042004", + "012304300423", + "012340254226", + "01023413453445", + "01122340120112", + "01123001450406", + "01201301141320", + "01201342012013", + "01203423203405", + "01233412231201", + "01234542466704", + "0112345001123550", + "0120130120134245", + "0120133401130120", + "0120341532012001", + "0123200456072582", + "0123344556784997", + "0123424526784704", + "011203045001033603", + "011223141215122312", + "012013340113012034", + "012013454245012001", + "012032142001201432", + "012320014514014520", + "01023224536553325347", + "01120345240203634787", + "01200123423523424642", + "01203204252607323832", + "01203405263234206728", + "01231234230112050112", + "01233405617658477661", + "01234254506736234254", + "0102341301451334451301", + "0112030104011501601768", + "0112131224120501160105", + "0120132456050156200557", + "0120234514200114452023", + "0120341532012001153234", + "0123400546244740784601", + "012343563567563578899a", + "0123450226246726020102", + "0123454016014017022301", + "0123456772678459453884", + "010234133445340113344501", + "011201234012014540450112", + "011203450116404501173812", + "011223456758398aa6268a58", + "012013243520130113202435", + "01123453626740013485405329", + "01200304205220565227527803", + "01233001140125302325300114", + "01234564701357012823452839", + "0120324514566745012032785689", + "0123124501653670484565362359", + "01231456478978756a5639b395cb", + "0123241456457572803901803114", + "012345167897070a9b34c2d83e49", + "01020134256271839ab889c6d64601", + "010234015647891895016356344789", + "012330014534062730063823344534", + "012334455062233445017819717886", + "01233456378598a5528bc5d22352ed", + "012345627037898070236237899445", + "0112013453267890a0bc4a34dc538553", + "01203452466573347389977380205234", + "012131456708609434a7b0c54cd908ed", + "01233451463472238395a26934462334", + "01234514607801598060a660ba29a6c6", + "01234564375896451264809645015845", + "012345678479a6b33c67871b84bde79b", + "0112340156702812016356737001129556", + "0112345367859649537134679649538501", + "0120324567860158159aab01589a862067", + "0120345245166789017834201689011667", + "0121304567895a8bb60167307c14457567", + "012314523678233973a818b7170117c5d0", + "0123245615788056396356150180784739", + "012345465789a0b37b5c13c8d2ed188f18", + "012345561781691a3001b530cdbccd63b5", + "012345677481983aa95269b6673a0b1c23", + "0123456786945ab282c8bd2ea205f6g52h", + "010231014524450256452473310102455618", + "01120301451265768934952a62b00996b09c", + "011230450167855934ab72c6bcaba3344585", + "012101130104567895677849040182785613", + "012331040104235647622304180104477931", + "012334456057234512018660011223893445", + "012343456237384209a49b4338c41401d0ed", + "01234350360783950652363a06b1c00661b1", + "01234543362723879a7a87b97c9aad9a797c", + "0123456157055890abc405bc1dc4e92490f9", + "012345678347797ab772239c303de1010ffg", + "01020134453463763458453463761701298229", + "0112034345656785198a650bcd8decefe14019", + "01122343506789753a50ab67c6bd1adeabbdc9", + "01203245642016012016786457894578641657", + "0123345260235246728495a1608bb7017c84b7", + "0123432546678440960ab460460cd760960171", + "012345462789a4ba2c0d4ce08fega4hgage58g", + "01234567689ab7acd74635eaafgfhi9h4fgfha", + "011213042556708912a701709b7cbd2d12017025", + "0112345605017578799a5a6b3c05cdc006de6bcd", + "0120345672356814560187341449142001144920", + "0123145062730123735062382314466223955038", + "01232403500678910a0bcdeb8fg279bchci050i0", + "0123341523567847788015011592348078239234", + "0123345240652378018917013452237852658934", + "0123405467871867622367870140547918871801", + "0123425430678419234284a3b6bcd2ea7eadea42", + "0123454657809a0ba0ca837d0e45f7ag5f5c78h5", + "010223145672786756599aab909a90ab3c0323cd3d", + "0102324564781659ab0170ca4b285a644564160170", + "0102345676808930a801b5cddeebb5150130a3a834", + "012304356270833904a6b883cbd38e4d4fgf1g64fh", + "012314354670622387146201144662237014870114", + "01232454657883938ab383cdefgeheh56dhf1c8aei", + "01234567389a5245738aa09a2338bc68b296233823", + "01123456702893470185701201472834124701704734", + "012034526270869220a56b52bc4586d8ed866b86f886", + "012034567467834274955667a7abc5d856ed6782a7d8", + "0123145301364768193136a6ab536447a619c3366dc3", + "012314567180011229a1b9acdbbe58fg1gbfdhb6ie7d", + "012314567587759ab94b6356b9631475a887a80120c2", + "0123421453678023018039421475234214a25a01ab14", + "012345652718895945a1b41c142cb48dc81c9d892c27", + "01234565473870451259477045477023017047458638", + "0123456712489a8916679001907ab5ab7a671223b5c6", + "0112230445565778955a4b45579c7de41b954bf4477de4", + "012034042056178291254aba3b5c59411da74a417e826d", + "012123400560705805019a5bc621d2155e210fdg059dd2", + "0123124035562374014089013512231235685689013540", + "012340125606789a398b23bc56c5bc8b566d782cc5e756", + "0112313415261278591a300b3031155972121ac62cadbe6f", + "01203104456417317879476479203a23b66c4db620044d3a", + "012334562347120134857923603447563412011260233456", + "01234205567689a9a4b5cb9d635638b589635638635638b5", + "012345526445676489a8bacd209389ea3c231e3c011ecd20", + "01234567890a02b0c42d6eafd57gh867834506diaddii606", + "01123024011224304563574501863024630112302401864530", + "01123415617186961abc7c716196d91a3e93d93ed8bf8bbfd8", + "012132405667895856a767bacda721696ec5056717212005c5", + "0123144235145667018056798a23421480018014428a233556", + "01231445677896679a78015280457814966778800114455223", + "01231456758297490a6297823bcdbc3b537597cd014d141e56", + "01232423516378393ab3cdc7973930ce7f63c79cagce39b37h", + "012334056783935a5bc6d901e87f7030g5988gg5hb2i925h67", + "01233423522365471601783465892352782347342347526578", + "0123345675685648977a753460568b1cbad275c26001d28b97", + "01234156789a36b0674c0112d42336ed01a55e124ca5c00123", + "0123435267879abacb4b242dd9791e02011eec4c2d525f9a50", + "0123453678837964183683a2274579b4c0badb91a223ba1801", + "01234556304514012723456830564527688968561445566889", + "0123455667710180369abc671d71041dd9c81d47718d80bc67", + "0123456738429a8ba26823317350a6688cd738239a42e44223", + "0102341445567348901ab7c43431cd3cecef7e7331b7b9901a02", + "010234534667891578344653673446786789019a78899a786746", + "0112345067014528503412013950348645285034120112288667", + "012013245678933ab815c0757d07207de0eff178930778cee0c2", + "0120341501466772200115587293346746150167349372200115", + "0120344534637617630158203445345863922001761776012092", + "0123034056721867890118893a23723a9bb5233ac5ad3ab52312", + "01230456785952a2b03cd385e5ef590g3cfh2c7a2bb0ih900459", + "0123451260017280579a6b39c87d1201d67d576b23129a45e445", + "0123453416455778236223579001453423166216230134455778", + "012345367581911abc9de7fb0235b075e7fbf58ggah81i45911a", + "01123440015312657834792a865340346512014012783453658678", + "01211340520637890ab2c02d1a219efaeg9e2d9ccbgdchb3cb4ccb", + "012313456407589ab669caa0011deaf6g6f56550cah7010701ifh7", + "01231456467081691481a82b352346c2bd6935e89b14a869465770", + "0123435667829a9bb44c2cc0cdebb4fb36bg4243362ha24cb44ceb", + "01234551643784499a49b3283cc9c59de3b25923511fge0171e78h", + "01234560748498ab088c14deaf4a2084agh6e574i7744aag0dde0d", + "01234567780690a7a0147b301c01cd4e78c49f4gcbhc14i8cbb8i8", + "011234145114617678839a4babc3035d656003344ec334cfc334cf14", + "01200345466207382359456a2b59202bbc9d90036bbe3e03466aafab", + "01212304056276047829abc3d04ba9294e4bfb4b5gh274784e05d05g", + "0121324565047859a61bc601d04604455e0f46d06g67a60f2f3278f7", + "01230234356001787395a9abc995544d54951901be60f65b54g4545b", + "0123133456732338989a297b3c8d8e23f23c280ff50f01170102287b", + "0123245240637624381946a77663a701bacb24d55276a77624400146", + "01234256352335176880011756796880566835564a42233556684a80", + "0123451627283942a8bc23d4b82e8fb8d1gb080hbc1ihjgbh11i7ihj", + "01234516375023011645894a9289681601455001451668894a922337", + "01234564783739a6809b5c16c937bd78375c8023e201ed807823377c78", + "0112343567869097671301a1bacdbedfdbbee2a2gf3h35fi866001i1figf", + "0120130120345675137862495601756220011356346256499a1320012013", + "0120300456781983304a20bcde2401efg2f44h242ie5204j012i61bef4ef", + "012134567618395a84bcd234ed21a3d25ab20b3990210b18473984184721", + "0123145062014714859847a850018550147b471485c0d101a8dc3d2385dc", + "01233456789a3523b6c8a7da8178d2eda7d29a6449ce0c2364ce9a35a7e7", + "012342563723378905abb442c45876611de0233723427661055823378723", + "012345361401700145582314451401708258239770019a45140145587097", + "01234536786714452301785289780a014523361467362345527823364501", + "01120130435401261201304330012667125485266712013098438554854398", + "01123043540112302643678501120179302667265412430130435401122667", + "01201342563556748701206956011301422042744201133513562001132042", + "012304567826390abc7de23feggf2h5i572jkfle0m8cn42h0o3c04pqp70ri4", + "0123340560672894abc9ade37f3623b7bd7f470gb07047hf34e3010g944i94", + "0123345067869a19188223b3bcdb083e82fdf8abag08dhiadb3jb3abdb3jia", + "01234235642342503523426476180164421823500189351801895035234264", + "012342526768759442a0675b0623a0ca8138a07581672306a0ad42de941d81", + "0123433515467487907a6b084376cdc06e869f6g086bhci0909ff787c1hc84", + "012343541678975aab01c93a82d04d0154233a82e578825a97233a23827897", + "01021345165713894aabcd4e94af453g9h2i02134a0a0113aj0aaj011d940102", + "010223400225511678393a23b4ac400240bdb4402339955195a88995517816e7", + "01123034546578970a8b4cd84d71e4d8f74ccg4c400a8h9754e4e97i9i788j8h", + "011231140563378760909a2bc9d6639a04057131327ef7g4142h1214g4ag9ac9", + "01201304015016477829a2200b1c04d747ed5029f5517gd7dhdc781c167ic77i", + "01203142536782791a010320b2821c67363d8e31e32f2053036720gb2h82i7h7", + "01213234567869a6a76b781776ac6d17a71ef1gh17f1a7iaa7j878j8a7haa6ac", + "0123345267138923a1521b3c857152cd2334e4341b1c9f6gch7c677c3c344icj", + "012340567418450112095ab5405a188a5a8c18bd6e56b51223bf74g7gbbf12hg", + "0123451670018770166287019816624945872301703a01162362162301708798", + "01234536478279027a2b1cbde2b4238bf88b37477ag5bd5d2h237i0237232h7i", + "012345564574689a8ba88c28d28ef59ag2569h9a6856622a9a498e9ig2j5f574", + "0123045670808952a20a808926b75c6d52e5453f04g02304266h7aa21i6d1a3f7a", + "012341567892a916b323c7b39250db0dbc3423503441eadbbc92a9920d23eab323", + "012345367286233608849023a9085b4586611c3623729c233661d7362372231c36", + "011231145674789a741474b7ab9a1cd36e31ef744c14gceh7e4e4ci7d374i74cgc0" + "g", + "0112345301677001286746671270017067124628344689281267017067014612288" + "9", + "0112345660015675561248233412600160564889127523120160567a755660017a1" + "2", + "0123434005167890a4b10140cd1d97eaf98egf0190f5gfhihaea8e789790hj5bh55" + "b", + "012345162578982a8bcde2fgfc0hhf0ije4d13jkil45j3m52c7minjo077pq5e2iri" + "k", + "0123453167389abc9c4d96ed6f4g0fhi0jk7jl7ml5n1jn47j33oaipqkd385q0g0r0" + "g", + "0102131425066778092a0b6c6d7e67066f0106020b6g2h6d6i0j6k060jjl7l6i0m0j" + "0m", + "010223455678897a09b77cdb0eb2fbgchdihcjj809db89b7hd78090k4502b2dbd47c" + "b7", + "01202345067270085259a5b7bcd02052e5fefgg74h59e5e4h353ieedie08jiiffgji" + "g7", + "012034353647342832494a4b3c344dcebfcg3c3h3i2jcg322j344b3i3k4l344m4nn1" + "3k", + "0120344565710819abb7cb329420dc08ae71b7cb8a710820ae1932b74565637194b7" + "cb", + "012034563701809abc61d9561bbc630b3ea0fafgahg663379a61ahh33eh36iah61fa" + "8a", + "0123141526700183595a2b2c6de51526f1243fdg6df15h15ihf15he5dg4e3f24262c" + "83", + "012314305646748369a2018b143023744601c830012314dca24656577de557e57ddc" + "c8", + "0123144564767837792307a04b0114011cdcade04f0g0114h8644b6830c44fc4c601" + "ic", + "012324025605576869ab1c0d5eefgf6g066h2i6j02016k2i0l0mek066b0d6ngo6g56" + "68", + "0123241456767890014556a91476453b900182233b82cd78bec79078cd823b233b82" + "85", + "012342251671577889578063013ab416ac63d0ed4201b425163ad00156d06316633a" + "ed", + "0102034567899a9b1cdefghi1jjk9el6mk894mg5i6mn8f01mohf4m07pqik5q5k4r45" + "ps4r", + "01201342015420013613366701855420422054853601130120425498855442982001" + "1336", + "012302245006075859585a9b9c8de9900f2geg5e588h0h240i50j48k588k0l505m5n" + "0o5a", + "012314356726233078496ab0c1678dec788f081814c4e6262330g4h68i6718h68i14" + "jhg4", + "012314456578794aab45659a79c7dcd234aef114g0343h4a01cg34144a010ig079aj" + "9a79", + "0123214135365768791335a5b20c01d323a77ef635ca575657f67egaha1h7ia7aj21" + "b21h", + "0123456474454883943a312b231c313aad1ce22a5a45afgbg25a4h45adi6aj5aaj64" + "4564", + "01234567869ab7bc67d8864202e0b767f65fb923a5b7gf14fhbe5fa53i2302e0beb7" + "fhhj", + "010213453567622482913a5b35135c9135d39e1fg013dh4d2124d3dh099e1f5c5ijd" + "d5jd5i", + "0102234506478729a90a0bb42c2d2eef7f47ag2h0iae02ej0iekalhm2nhn5nhmaeek" + "bk2dal", + "0112324154126741899a2a93ab8bb7ab12412ac6abb77db7ab2ae4124112e42aabb7" + "6768c6", + "011232454626748087092a7b6cd80187243274166e466f01g27b5b45h326326fg2i0" + "099jh3", + "01213453566227423430014892a0ab538cdaedab1ffgd3344101282153h50f018i8j" + "27188j", + "012304562789ab8cdefegfhgh8i0ij0h8kilmlde0nmo6ogph5h2h8h2qrd7aq0sm205" + "qcs2at", + "0123200456712801291abc0d4e717f7g277g50252h0dij0k0l0m4m201j010k0n1j4e" + "0c0noc", + "012324025617899a7a17010b9c8d8ebf0g8h8ij686088k0l088h9h2m9c0g0n0b0502" + "2m0nbf", + "0123245006576879a323a35a572b6c56576d0e5f7g570h5ii0566j56622kik5l5672" + "246c5f", + "012334405667189a0b3c29d23456cedcfghiiddccee57jh99aa040747j23gj01hkhj" + "gjfg1f", + "01233454063475895291a813892034ba6c0691896c20cd52a87e5475c86c06522006" + "6c52ba", + "0123454657689216a068ba8c01516816a001231668458c347d23455745ce342c232c" + "347d45", + "010231402516744668598ab75c250274400225597416d93193311668ed8a1631d9ba" + "93d93116", + "010234032567829325a5a47bc43dcefgghi6j534kfl56ci1j5mbcniopeq78rq87bij" + "kocsmskm", + "011234564785079ab9c06cde9afgaf9ag8b9bhb747i44707jiik56c001kd121i016c" + "c0016c1i", + "01230451677834894834ab268ac6cd67783eef8a34b5g0043423chhiieef519jc151" + "9bb59b9j", + "01230456175839abcd0be83afa7g0d0hh23i58cd0156833aab0b0117586j56586j83" + "3aki9k39", + "01232456407368159a566bbc235d568edfa6cg5d56288eeb6ba69abhbcci8e732j28" + "8e0d2jk9", + "01233145262728929a7b2c929d3e3fdg232h92944i273e2j9k944l9m9n949d4l9445" + "94922jnf", + "0123456478895a3b94459c7c785a944d0dce9c944d0f0b17egce7c1701haa2230b3b" + "0b23ha0i", + "0102134501611708196a060217ab2c1d272c0e0f6a01061g1d2h6i022h1j6k011j0e" + "61ld4d1461", + "010230404563278902a9bc8d89393040e427f6018gcabca97h5i810127638g403002" + "274045e45i", + "0112343556781970011a78b7cb8378dc70b7cb3e83788f01703578g07034b7g0gh83" + "3i3578b7cb", + "01203445611301077890a38b1334ca38a3dade07f08ga8h48i2d2034f16106h42djf" + "f1388i13f1", + "0123244526271891a92a2445b001cadefe8f18fgb0012ahi6h8f230ijcca01188kk3" + "232aij9118", + "0123340350678192abc1ca01c4cdc178e881014f23eg3e232hi5e8bheg50hj34gk23" + "eg3eeg34gk", + "01234254603623421760016017783660238917014217545a78546042362342546017" + "5a01176036", + "01234564574589a323b36b96acdeadf289gh90a3238gib89a3b36b96908ga3dehjgh" + "egdeada3ib", + "0112343506036789383a0bbc0d063e3a3f03011ggeah3abi0b06033j6k0l122m3m03" + "671nbi121n0o", + "012324567889a6bc8556da9ec5065685fg7838hd4i899e38eba33ja623daa6k2gkfg" + "0f06a3244ik2", + "0123405464781891a90a4042bc185d547e78187eef0b405d91187g7edh7i78180bhj" + "5d545ddhhj7g", + "01234546784824297a6b4c2d0edfgh5i4jd101kg1l9ej1mg4j7n46no7hn8apmq0rm0" + "247aapmqqs6b", + "0123456758459a2b5cd8be8f2g27a08b585chc3h232g67218f5cfi2758f0j6672101" + "f08f585c9aik", + "011230014312546530016743542889129a2801658930120143304354126528671201" + "43300112432889", + "0112340305067837395a053b3c011d0e3f033g9h0i0j5k0l1m0jmhgnjo0j03399h01" + "1d0lmh0503055k", + "0120034546782092abb5c520dee7788c2d0341203f4g46fh03202did6i46454g3fhj" + "fh3fk3deid1kde", + "012131045267809a01793b3121744cbdef3g3121ah3b3152ij21k5fk524i747921h8" + "9a79744ik5ij31", + "01230405200627890a7b4c202d0e2f2g277h2004274i207b0j2k2g044lgl2g7m2720" + "04287m0n20288o", + "012345067628971aab578cdee3355fdeg9970h01232806id3j35fk5f0d35fk232835" + "id573j0ja88cci", + "01023134356067281902608ab54c3435d0ebb5ebd52fg628603h310119023ijhkj4c" + "3101313hjhkj7k60", + "01023431516576466894a576b70c9dde6f01cg51hba546i20bhb016f4b0ccgjf020k" + "0ccgfa949dgdcg0b", + "01123425016789a0b75cd104c8e212d1fd6f6g2bd167255chi6g4h6jif6k67122bb7" + "676kk33404d1fd9b", + "0123045678795a5b1c7ded9fghidgjghkied4l597m9m4n4lon2p6pqa8o7pon5a5jpc" + "5r50594i4n4ijs5j", + "0123245678917aa001786bbac9d4914923e3fef5566bba4gd47h4i7173e3gj7hfe3d" + "91e3d4fe4g494i3d", + "010213010425676879676a060b6c6d06be0f0g020h6i060f0b2j1k026l6a2m016n06" + "1k027e67060b7o796n", + "0102342352654748970abcdefg7he66h7h47g9e6028f974854cdi5deib8f52i5jibc" + "544754i5jikj1k97g9", + "0120230405062738232920042aab3c2d0edf202g05202aab230h2i203j23dk5l0e0m" + "055n20055l2d20dk0m", + "012034355626178901a32bcad85de4f535gedehdfia35djg1i34f5a3cageikfi24ik" + "e4d8degejg9j202420", + "012343540506076869060a0b1c022def688f6806022gghdh012i026j0b061k010206" + "6jjidl2m2d2n1k2e2m", + "0123452627787980ab780caded3efg2327eh807i4b7j7a3e3keled8mdbedfn3e3opf" + "oqersfo5453k8fpf3p", + "0102034252647502528742967552a8877564a84b4252c5bdebcfbceb0252gehiehi9" + "96643j030252c5gejgkc", + "0123142536200758190a2320255b3c230d2e202e0d0f01192gh9250i2j1k5l25200m" + "2n5l0f0o2g0119h90o1k", + "012345564714478769abb5c827d8efbg8745h00114ia47fc2jc88727875dd88747b5" + "2jdk5ddkb5ab3iiab556", + "01234567879aabcd5ec4f3ghgf23ij465k50lbjml04542nmi2fogf5e23gjci5mjmgf" + "5pfo8jcajqcrc88s89cr", + "0123456789a8abc75de7f15gfh4907841d2ii9a24c8ja8ak2f9lfha22ma28n86ao9p" + "aqgo8jfp4045ak869lfr", + "011232045056075008595a505b1c015dae1f5a500g5h5iaj5a011fif5i0kblam5a5b" + "50bl0k5h0n50bo5bbo500n", + "01230405560748591a5b5c05070d5e595f5g0504022hhb5b2i5f0dj90k5l4m595l05" + "en5eeo050k01en04014m1a", + "012314561728498a3bcdd92efg055223h3igichj4kfgig565f0hil012e145m0n051i" + "ioic8dcd01ic058d5p5f5p", + "012320042506370829232a9b292c20bd9e0f299e3g9h0i5j250k5j4l2m20230n040n" + "209bbd4d04202m3g9h4lmk", + "01234567524558944ab9cdef944558f538b9gaha8iijga58f5gkj08hga4a8i948hef" + "dbb9944ag6677efchagaha1h", + "01123415678939a5abcacdecefgahi1i15a5gahjecakcaeclelmlehlakin6e1iin6o" + "e001ecpdcdecle8l801q4h4634", + "012001314536785739a28bcdbdbe8145fg782031df018b785745399hhijkk4455778" + "bd8b81bedfec9h31399h310120", + "012032145601207083965a56b153c88332c8011996261de996262070f7015g70f7hf" + "id7j01701d5a5k0k707jb1kbde", + "01201301045165170187174951047a17ab5cdcdeeff220017adghif27i499j49khdc" + "deeff22004499jichidc87ichikh", + "012342532342647687893aabac8dbeabfagcac203a428hig20236442gc9f201j0120" + "235hk53aab895hchgcigeibeab9f", + "012134526706804996abb8c8c7cdc19e21f2cgfh6f96fi9ec767c7c121f2eacdfh06" + "d3346fcdhjfhc8cdd33eea6f3406hj", + "01231425167289abc61de8af018gh0077ei86i7je8c66khlh0017e8m1n01h0haacc6" + "6bboo323p2p96kpq2re87e727c8m2r", + "01123241561273689ab03ccda54e01f2g7eh12fi32f24112b041i99a32fi4efjf2k4" + "73dkg756i93ca5cddkk432f2fi4eehfj", + "0123451206074819ab5c4deffg74127h3i7445485c1j4k23c6123i232l061m0d0n9n" + "fo1f1beflp124d4e1q4rab3s1fjdef1q", + "012134526276896ab1acd55276e66a6221bfb1766221b1b3344gghh8899iij34e689" + "bfb3b1017k07j0344gghh8899iijj0017k", + "012134567832397aabb0c6ad018e343fab0gah7ib0ij7a83hkh4blbman34213o78ab" + "0pbmmqbmb0hkbc8ras7a3f788rckanbc7i", + "0123045067895aab5c0d6edf0g028f0df73ha33hiai88j5agkkl5cimkn8ogpon8oq4" + "gk8fkn2c02040g2r020ddf0g5i500s505i", + "01234156789a1b8cbd500e78f37gh4di0223f35f5j019k5l1m1bbd7ddi411mbk9kbn" + "1bh4h6299op97p7g4qer504101501b4qbs", + "0112343552626787904a186bcdefdee8cdg55283hg346i673501j31252g5fk52e801" + "12iccddeefk990011252g5hgah4a34j37je8", + "012340525657899a0bcdc09d1e9f9a9g01fhfi9f54d49dgj4kkldmlnlolp9gln9dfq" + "r8dm1o9ffq891er8rsr84kk3235285r8rs9g4t", + "0123454614278289900aabcb27defghfhif12j9kkl1m0nnd3d232gfg2j01909i0n9o" + "0acp82d5dq2c8289crab9snd822c82899knt9s", + "01234567849a1a7b9cd267e1f6389g5f67hi01j0ik019a1a2301j0cd0h450138841a" + "9cbj9a1a010hj0hi9c0h38j0bjd27bcdd267f6", + "01201341567617016869ab76170cded4416fe54g56hi1hjdd44k41el4m4n4m1hno4n" + "papqiqhi0h202r4r4nnoloele5asdeatpteae5jd", + "012034453136012072813431902a452001818b90cbdef872b2aghf2aag81b2gicbjc" + "5j45343181f8k7cdcbag722aag36b2gicbcdde72", + "012030435467282030799a9bcdef4e499a1a010c7g06302h7ifj67klef43mn3n434e" + "edcd06o57g2k52206p062q2krlkl2k2006fs2hhd6p", + "012345462775821597a00127b997753c454dde1fegghhifj82j882151f4d454d2327" + "de15eggh4d45754dde4d150115454ddeegghhi7527", + "01234550670894ab9c2daefgahhii7ajhfh04694fg4k9cdl94ahem9a94n042hoh046" + "2mhi232d2mipemil4k45422q01n0erhoaeas45aeer", + "01234567891ab4ab7c96cd89defg2845e08967011ae05fhf7cih96cdde2g455f6789" + "fg2889967ccddee00145hf23ih89ji28fg2gfg2889", + "01234526789a5bc50c7dce0f97gh9h92i25i0c948j92bkc5al5bc5e8cm9a92n00ccm" + "m301ioal0p8q2r92n9n0n992i2io8o787ds18j2r0t0p", + "01234561728950a5bab84cbd0edfg5a445500161c6baa5g57hbdb8346i08jk5jcl3m" + "4c3n6oapc6a40e4qbrbaa4454c34455jjk8k8scl3nb8", + "01022304501637859a1b5c019dd050858e5f85egehdiej8ed2gkl823mn02ob2m011b" + "ob5olal96p023q6r163s01hnehd0022mdi5fht6regehej", + "011232434526078969a81bcde10f011gah0i6j8k5a01al12emfn0f01e1ae0obpbqhq" + "pr2c1be13e3201bp261b12cd2c2668psah010ihpahptal", + "010203345067899a5bcd29ef9g5h9iji4k1l4l011l4l4kjk3jbd5b5mn5jo5m022pmp" + "5mbdf03jfqef29rhecr35snen55hrhr3e6netn5se8ne67e8", + "0112034356372860898abcdeecbc6b56d17fdgh56idj0k607l6l56h5h443dmd1dm4m" + "43126i7f3n017o1p8fjqdj4k407f138f7olo3nnrd1lsdjjq", + "0123145067789ab2cd01ef1f01964g6h65c0bedi2jb2ab966707c0ace4be4k6l1450" + "cm8nc08o78678pcq7r505701b582bsc0b282018n7rcd144g", + "0123242325656778890abc3d8d4e5b1fgfghbgb7ghbg0ihjk0011fgfbg5bh92l6mnk" + "0i8o892383h9hp892l3qh9hj897r78838o3s836778833q7r", + "0123345652233778899ab0cd68eb6f8991bggheiejb8k4jl344mnleieobgejjlnl3n" + "3ppdhqk48k684r34phk44r3p37hq7bks8k68kseo5o56677b", + "012343567282729a23b7ac7223d94deb437fg59a72fh03ig4j4323d901hk4d60d903" + "ac436072g5ci56b7604j037f23fh7223hk03607facb756g5", + "0123455678484595a9abb29cdefe9fa9ab4ghih0044595cjf7kelhfe7m2df79f3n9i" + "befe9f9ccj23bohp3nb2hqhpbq4r7s2dd72d487848202d4r", + "0123456728093a9bc02d958e2823cf2dg42hi0ij4k3ldmnodc8edm9p6ccg6qg4idc0" + "cgdcr22didgog4r66n2hcg4sg4cg6c23r2trr66cc04s233l", + "0112013234567897a93b1cdbae5e05f0199g12h3i67jhk7h5l2m7jdndojd7j571232" + "h3ahae5e053p2qh39h19dn5r57782m8m7857565s3plt1c5llt", + "012334156735879a3b8c34d4de3fg8dhd94ih1cj929k6k8l1mdhh1010nln8l8ccjej" + "a0gdd4o6672paq1r15a2233515d9343f8a4s1raqd48t878td9", + "0123455653300778981a1bb89839303ccde0fd01gh5gij075k5dfdlfl223m4ln2oip" + "ficilf1j4lbqlb4l5rl2bs1b1jijfilfbq3053454ll22oeo07", + "01121345671859ab069c7db97a7e67644ffg4hih2i0106a2hj459k7lab2man4f1o12" + "45bpb959454hhqiq2iab1o679kirdp7a4f2m4h644hhjihhsihit", + "01123456577008292a01305b3c8de3fecgfhe55a0idj57dkhl08h1124m34hle33nao" + "5b3c30cg2a122k8d08dpaoaqdj1r7sh1h72afefhh1122a7s6gaq", + "01231341546718904a41bccdefecbc3b2g953hij13010k36234a366lgjcm06ln9k90" + "2g2obc6l13ap3b95232q4e1316234i4aar4e4als6l36232qctcm", + "0123451607893ab4cd010e357f2004eg0e077fhf3h35581igj01ckc101044l04eg0e" + "044558bmn9b4bop223ck45gq5r5889n93n23p2pbb45lls8g585l", + "012345466728591aab14ac451acdedefag14354546727e723hfi1a4j0kfl46km4n7o" + "23k6acef67jp724jbp7e011a4601ef5qfr455qef46677eefflk6", + "0123456758393a4b535cdcede47f71ag7f7h9739580ibibjjk8k58533ldllmin5cl0" + "0o5p7hb2b69qbr399q01bi4b3l5s450t4bl05s2l3lbj0t3ab2br", + "01122324565267482419a08b12266ccdef0e01fg9hi6efaj0e1k01cl486c12244mkm" + "0e1n26fo8p12a0cl19521201a024cqer5s52e548cf8pcqfoet2452", + "01231452316789a8a49b23bc1475de52f6678931a83g2389efa414h0013123idgjak" + "ci9bbcci3g5223dea475idci14bcf6677552233114899bbcci3ggj", + "012324145617809a3bc414177d23edbf240114acc41401fg17802hi93b23g5680180" + "bf012414fg17bf7j170180dkkii99aacc424233bbffgg55668800117", + "0123456738591a9b2c979d7e9723fe9g7e7001hi3ij3ej1kjlgm6n0o7e7p9gej6e7e" + "706nej0167fqhr2seh1tf6233i2326677eeh702s0o0197fq70979g01", + "0123341536789aba4cd3de14343f0d36g71hgi4g2j34dkd31ld9b823m09n78d33414" + "01eoded36p46m04g36d3d9b9dk7q7r789sbtb96pb878g77rgi4g141lbt", + "01233453621778126296905a8bcdce8f7g2h1e1ijdek7812233llccmemcd2h62cf8f" + "3n9o900553236201ce5pekeq1e3n8b8r17is1i177geq48tg4g485g8r5p", + "0123421256574819741ab86cbde9fef001951g7h56e79ifeej4k0l7h19f0lm424n01" + "0l6blo95mc015plmlqbrmc1g120l01199i6b6c0l6bbr12e774482s42e7", + "012345675847894a422b455c42211ddefe3fg3hghi581j8k456k7l0mj8g20121g2hg" + "hi6n0oopjc1j010oopap4a457e6736131d6q8r368k0ssc0mg3gm0m016n", + "011231043563676896900a01bccded2e1bbccdedef312e1gch12ei2ee7ei0j1bk27l" + "011b2m0jnkmobc2mbp31cqcd67ed8r6867k22mmo7oe7edcd1b31cqnkns8r", + "0123145460017893a0761b0cd7efa0ghigji01bkclm4gfmbcn0cgo7c017pbklq0eim" + "54a5a8787pi2climbr1b01br0e0cbsmbm4a4m4imi22393ata0nfef0e0cbs", + "012320451637238352792a017bcd5ef8823g20hf37h40if3fjhfh44cc8823g20cd23" + "gk7l3g8mgkcn23525obp1qcb5r7s7b4501c8825r8m7scn1qcb4cntcbbp20", + "0123345405066787890aab9cbd8ecb9c9219f1bg06hi3i239j89fk8l922h89926mf2" + "23673423f2fn0n0aao7o87899jhp5qr0h505hpbphph505bgsf010af1sfsr", + "0123345647892a3b2334614c68566d2eaf2a2312af616gh51i2efj348k0a6156af0a" + "lmfj5n0168nonpno8qob6dhr3bh534nlgmno4sobnl343b344cob5nnoob5n", + "01234556789abcc7d91e8f388f3gehh4789agbida20f238f45d9jieh01389aa21eeh" + "0kd901230fh49a0kidd9ideh1e018f786jjiid9aa23ggbbcc70f011eehh4", + "012324143560470849011a9b01cd8d82e2ef6f60015gbh49ci5j2k1l24499bmb8mbi" + "cn4314439oe3bp2kbi1qao3ce3dr148s3543cibi49141qqtst8s8ddr355gcg", + "012324453667489abc9b239adaedefbc9g3hi290f950iji2fif99a5k242345ala2da" + "d4dm5n450o36232445ph673h906q36233hphip0r50a2daeddm2s6q4524459g", + "01233425467889ab8334cdce4fgh6c8g23782bbi34236j5k25cl3m23462nbo70788g" + "gaab2b3m4g012nga6c46ghap1qabbr2b341s1ttj2334466jtj1t2b5k6cbocd", + "0123344565768939a7bc7b34de9f904g39h33i9fhjh37223344g0k8l90h339900b7b" + "a7hmgnhj6gho8m7dbph3hq34gr7ba7hm8ms8saa7766g4g34hogricbp7d3iic", + "012345611778693436a72a6b1c7dce2fcgah61gi23dj1k6l178m34361c8ignlogi8i" + "4p348qgrcg5j4p7ddj5j851cgn36696l3623366117a74s451c6lce854s8q2f", + "012345674821499ab936b7bc236de21b8f48499gb99h01eij449ekeili9amaen1oje" + "eipi8p483il64l525q0q011ooamaj4en0rs0sjj44ll6363i6t676t1b011bb7", + "0102034516177829ab0ac13d7e57035f7g177ghijigj7g573djk0a40782lm45n4502" + "aj0a40mcc88hhi5o8pjqhrgjhi572s0201187ggjjihi57hrctc85oct8pab2ljk", + "010234256076789ab3ca5d02eb603476247f25g602605h2534b37624gi8j6k347824" + "765l79b325028m780nno60pee7bqebo878e7peg6600ngi8jbqor5lnsotnootns", + "01203456723528693a8b5c560dcdce2f6g355c3hi135ij8k20727l5c34lm7l013n72" + "288o2p7372cq28562001cd0d8r28i1725i738o208r5s5601288tij28200dcdcq", + "012304356748902ab3575cdb0435462335ef5f35136g3h04ijjklk0l04466ma62a1n" + "23132o3h2ab30e48a82ai29iijhp3hb3iqirbrdbd9is900e9i90et0e909ietis", + "012324563623727849abc9bde9fgfh8e9iej9aakab2b8le9ak72jf2maniop79ieq78" + "4ee99aab2b8efgahioc92mcrej4s4eanj1ftjfej80j178ft8e4e4scscrp78580", + "012340565787798a8b87bc7dce9f24797bbg577bbcch0h0iij6j388b5k45hlfmcecn" + "24bc0h239f5ocpfq570iij6j565kkr9r79bggshs0hcn9f79bg7b3t3b3t7dbggs", + "012345675849a97445587b67c6d6c60ce01ac21f017ghi7b6j0ce049h2c2ck5l2mc2" + "a9hicnc6hokpbq7bbh2oc2c6dr74d6spdr7b2mkp45c65tck74c6677g6j5l49kp", + "012134455657892a1bbc6d42be21bc0f2gc8eheihjbkbeglcm0c45eh8n1o5p2q422g" + "bkcm56bepi300f1bc8bc1obec8gl5p2qpi45eiehgr8gc8bcbehsehbebcc88ggr6d", + "01234225126789a96a06011bcdedcfghd47i674j1889a96a42edebbc4jdkclmjbn4o" + "121p4j424o2qg042mjmr4jbcfm6s67mt1bcddkmj4j42mtebfi7i6706g0gh122q6s", + "012345166718896a9b161c1889424def9f393g239hi2djk4l09f2301144ddeef9f39" + "23i22m6niol09bip1c18i2cq1c16e618ir14es8gkd2di2ittg1814de2diokodj2m", + "0123456357256896a9a223346b4c576d8e5f9668634525237g7h5f6d4i717bi0bj63" + "6b2kbl633m340n0171o44ii07b0piqbl25577g4i45254iiqoroso4i0iqrqiq4ii0", + "011234255603725801698a9bbc56586d1e563fbg8h256958ai8a3j0301123j1kkl9m" + "9n56ao6958259m569b25122569568a5825729b1k7p8a2qr77212bgas258a58252q8" + "a", + "012034322565474829aba38c322de7ef6g463432255h5ggeai474j7kela3em6nofef" + "gee74746bg7kofge4jpqgp48or475ggp7q47488c9c295ss1so32emotai5s20255ss" + "1", + "012324252312010676893a7b375cd3809e9f7b257g06371h12ai250j9223fk377g7l" + "3a929m92377l6l06011hdhjno8opdpd323920q806r069f0jjn0qns6rnt5n899e3an" + "s", + "01234564371879a29623375b45cb79bd64eccb8fbdg05bgdbd96450179ghg037ec64" + "ia5j5b23a2gdbd015b5jjk23377945965j5bbdgdg0014564793723a2iafi1801gdb" + "d", + "01234567783779678a1bcd5e6f30ad6gah78676ifjik781l678m24233070675c0n01" + "1ofp6f805q8m1l6i2rho8ahs24gh586ght8a5egh456g6fikqk5q45242r0nnpfp6fh" + "s", + "01201343015220657378944394a97b73139cde1fg47b73bd7b73430173chg4i67b94" + "13205265i6bd7b43jgg4jg73527bbdde2001eibd7b734394a9kafk1f0120529ca99c" + "ch", + "0123425046721889ab011c4d23e2e4a40a01188f3gah8i1jek2342a40a3lml2nao38" + "3lmldmdpop18cq1cmr42ml1e4d383lmldm4d727e3gcq7s8imtdm4d7ee44ddm7emt57" + "7s", + "01234567489457ab0ba95c4d929445e51ef1fgehijej1eg1gk4f2lfm16g1gngkko16" + "lp4ffg2g2llp8pki0145ijejqoer1eg1010fgner2ggk2giski23fmit4d92koqo2q92" + "is", + "01121324056787495a5b2cd2efbgdh12d2ai5jjk3k131fefe4499aa71l122c2667a7" + "5a5bbm1ncgaogp1224bggq9q4924120105a78r87a75asee449055bbggq9q49e4se5j" + "gtgp", + "01123243456748945ab5b67c8d7867ae5abfgbg9ae90a601hi8dji4khi457cajjla6" + "b6b5946m8n0hmoaj0p5qa648ajjl8nhijilrjl5aajji5a45hi4k0hlsjlhijijllshi" + "0h90", + "0121034256017589a121bacb0dec0138f2a1bacb0301213803a1bacbecgehg9h8938" + "03if0d4j2164f256ec21cba7bacb4j75a7jkec75644j645675a7a121ifgebaeccbec" + "geba", + "01203456274879ab61bc375dabb5ce56012042c4bc0f7gfh2i20cj2i0fgk5l427gbc" + "jma33n20fo0ffh42abb5jpbc27fq20cj0fcrrsc4fqcrjtrsjp5lcjgjcjc442273420" + "gk4220", + "012314355672480901ab23bc3d09da3eaf6g14177h722335562i6j7kkl233d72355m" + "dn7h3d35daom7k725m3dap4o9qda2372ab3d7k35566gdn6rda3ddabsabda56353dda" + "566raf", + "0123341235236227070181985a6b35238c5dce98fc9612cgh0ijbj5k7l6m072n7lh0" + "8o1p127f816q277ffr8ifsfk7f3507hth9ht0t077ffc8c968o9823355kfkcs8c9896" + "6b122n1p", + "0123452678699ab9988ccd2d12c5beb901df982dcghgdihj2k6l9a69mh9m8c787626" + "2dbe8412bnho78mh0pb91b010p7p98bnbqb9mrho9m692669b91bbq2kcscgcskt842d" + "2kkt2ddf", + "0123454012267112010869262a69269b5c6926ad010efeaf2a0g0eeh450g9i0e405j" + "kl5k5j0eem0e6945klgn6lkl5k0ggnjn5j45o4o771699p0e69fe26fqaf2a1rfsad12" + "26699p1r", + "0112342526768798a96b2cd40e019d1ffg7hf39icj34d4dkkl9d26768798a9dk9d8m" + "7nfoep87980eklkqdk9dkra9dk12010eeaep262c9d8m1fa92a126s26atdk2af3kqa9" + "2ccjojfof3", + "0120343546737897a72a209bac9dbe2a4f9bbegd1h9d20346i01jf1j4609gk6l3497" + "5m1n35011n5m5g463446359d09202aa55ggo6pdq5g344634355g6l3rgdds359t9d5g" + "gd35dsgk3r", + "0123244550265782249a0b501c456d5e260124821f8ggh4i452j246k5745a49a988g" + "ghhl7man82577mgo26ghmp45gq8g57mr7mhlmrgo576dms507l822445a47m4i017l2j" + "hl5e1fanms", + "01203456137548492a3b1c1d01206be01caf13ghcg1cgi13cj2kcg34013b1cgl01lm" + "cge0nen76b4olpgl561c0q5r75347220011ccg132a20015se01cgl7201lmcjet1375" + "5se072340q2k4o", + "0123455667897a60249b5c670d8e3e30605645dfagd989232h5i247aejake6ej9l67" + "e67a8m5n8ee65645mo8pdq246r677se60d898ee6600ddt9l8p8m89d9aq7aaqdqd989" + "8mmoco5c566r7s", + "01234054677879a40b06ac678d7d8e58541f40fg8h584i1j0140k554a4kl5m1f0154" + "8hnk405o4i1f01fp1f0154400154fq1fa6nankkllrrj1jl26s5mlrltrj1jklnkncnk" + "kllrrj1j0106a66s", + "0123453667789ab2909a231cad3e9a363101909aabbfb2fg0h6i362j4fbfb2237kbl" + "3e0167b2bmmn144ofpbfadabbmmnqn3623blfp7k36ri31ds6i36317t14addr01676i" + "ri0hb27tabb2drds", + "0123450678299a6b7c3c1d34ce3c2329cf3c23g20117069h3i7c3j3423g2gkcl2940" + "3i3ckmk4403423299aab6b6nno0p3c4006406nnq6n6bab9r9a34jsab3jjs349r6b40" + "6n06nt6n060178171dnt", + "0112230140564708791ab49cdb47794eb4fdfg9h1240db4efd47b401gidb40jkb447" + "799ldm4701gn2o12fggj1adb5f1pbq8a01mkb46b56406rgnb447ds6b7tdb79b4ds2o" + "011p4779dm9hb4dbdmmkbq", + "011231244546272839a97a272446b6cbdce07f3g5h46011207fi7f070131399jak1l" + "3m39gn9o39a9ak5k4546b6cbcpdc5h3m0q3146ak39a99r3gak31ed01125s45465s24" + "b6ctcbb64624120qeqedct", + "01231450670896a89b3ccd259e593f255059g76796595001141hbi14aj08ak0l8m08" + "01313cgn4g230oa8080114li4p9b968qnrgnns4gg7gn4c4gnt1hgnnrcd1401144gg7" + "67969bbili140801144p8m", + "01234516789aa770b98c9aa778cd8cde0170b4b97f5g455hb4hi78cddj9ab9a75g1k" + "42cl0mb49a8c5h5n7870clb9a78cdocd01do8c8p1q7870a70m8p9aa701b9b445705h" + "422r01a7429ab4b9b442239a", + "01201345622782980401203a1b9c13cd3e01f5g6454h20cf8iej828kclkmf5206204" + "4h4520133e828k0198hn4h134504eof5hp45cff5454h04hq200r4h0113623s6t823e" + "201382620120g9988213043eeo4h", + "0123201401566748146269200a625620b29c5d014eaffg14690agh200i62fj5601ak" + "0a4l2001af62141m4e0afg20b20a56699cncdno5gpafdqfj0120625do5fr56ds5d56" + "62dqbt20011m0ab220b20aafakfr", + "0123404516017840097a9b0c098440ad8e1ffg9b848h9i0140jkk484787ablkm1fk4" + "ad0940k49bfn0978j7jkk4bo9ib2014084dpad7a789b2q094084fr9b0c1fb2as4001" + "40848e7a78as84409b099b408478", + "012034256307204816921a6bac16011d20013e635f3g9hhi1692202j019h92hk6316" + "1a2034169hhl25m04n631601ao34201a63op01aoqm1amr48qmosm092q9aoot201601" + "1a20259h5f922063mr01ao3e1a012092", + "012324450678925ab91b2cbd1efg3g2392b937ehdi6j697kal2392249m45926j37an" + "690106691e5a244oep2324bq924524b91b1eehbd7ref929mb92392b91b1eeffg5s24" + "3745247r5t452324b99224b9bd455sdi", + "012334561701896abc5dae18566aefg56223ae7h0g6iej01177h347k4l3m56g56256" + "gn230gg5eo3m56016aae0g1817626i6a2p01348q6223183417g556pr62g59c7k4s6a" + "at4l89gn7b170gg56a566ag50g01177b", + "01234560761438790a9b5c604501761d5e79bfd2609b7601791d609g0123d2bhhi1d" + "7614794560gj5kdl019g6m602n76799b0160143obp1q01601d76bh79grd2bp4s141d" + "236045d2011d149ggr23604s1q76796m3t", + "01123405677089abc60d67dac8c67e7089af011g67050d9h700183gic8833j4k0dgl" + "c66789c889839m6ngo7ec6c8671g70dpda0d017067aqc6c8899rosdpdago830d3483" + "c81g6770679m01c6701ggo67700dosdac88334", + "01230435167867013916a08b786ca201a0cd2eef01dg16676cch8i1j1678a2676k7l" + "676c8mcdnodp2eeq78018ba2a001pra2166cdpcden01a023noa2a0dpps01dp162e6c" + "a26701a0a2cddp6c6kprpt0116cddpptcd6cch16", + "0123140105607658129a010512013b8c231258018d3e6076f601237g12ah010i60f6" + "fj053b588k2l6m60761201600512f6nfn96m5o2l2pnf12609qf6nfrnr760580160f6" + "n9nf12f62p60n9059sn9nf60f6ftn99aahth9s60", + "01123045260114125708934530935a9b2cbd0114ce1f933001939bdgbd30934h9b7i" + "jk1430bl011230lm452c14djjk5712bd939b7nco93cp3045011430932cblcp9b9345" + "5q45lrbllr309b011f93143093010814459b5q7q7n", + "0120130124356758493a9b9c0d3e8f203a24gh13a63a136ga63549gh9i01gj3a6g4k" + "a6gj133a200113244920al244kkm3a35al4k588n6o01352024201301351p20582401" + "4k8q20245rks4kksstct9c2420a63501133aa6356758", + "012324154647789abcc20b24477d3e3fbcdgc223chfi5j01150bbc3ffic224010b47" + "015k1519flfmbc7d9ndo2p2447c2247dbq0b194701fl0bbcdgc21923dr7d0b01479s" + "19010b5kbcc22447nt7d9n19bcdr010bbc3e01199nnt", + "01234506789a5b6c807de40fcg066c78h7ciijhef280780fe9h79k800678fl804m2n" + "06heh76c78e4f28oheci06ipe44q800f80e4h72rflhe78f280e40ff280h7782r8045" + "066s6cci06it80e4heci78h77880066che6se4ciip4m", + "012345365768891ab223c43dcb304e2fc4b2233645680g8hcb3001c4b21i5j4e23bk" + "6limen30b24501jocbc4455j1i8p233d36cbb22330013668cb8p30b24e45iqc4cb4e" + "b2231a1ib2ercbc45s45c4cbb2233001361i30iq360g5s", + "011230456708733069678abc08733d7e94308f0873aggh8aag6901679b304i690jbk" + "gl1m08mn8aao011m73mp3001941m67733008cq69678a7394ag309b6745gr01699bbc" + "677330678a4scq08016994698a4s1m67mt1m01733001737e1m", + "012342356758239a2b79206c7d1e013fegeh67dic423359j795k6c671e42c47d2042" + "dl016c201mc46742en017d6c2bc41e01en236742do4p237d20355q6cdo67cr42c46c" + "203f3567429j0s200s42c423426cotdo677ddo676c23c44p355k", + "012345674078099abccddeb440dfag0996fhai679abcb47j40jk3h09bcgl966mcdag" + "67b47jbcc2b4404509bcjkjn7896b409go7j40677j09b4bc9aagdf0940cdb4bcb4df" + "40gp09c29a96jq2rag9sc2gp7j679a9sbcai9609b4400996b4677jbcjnc2", + "011203344567589a2b3c34455d01de6f031g013412034h3c01gi03458j1g588j3445" + "865d035801gkd93403863445586l8658121g2m125d2n011g3403de6leagkeo34mp12" + "45de5ddeoqeoer0134de033412455dos01de5teo5doq34de2mer125dmp010334", + "012032341546010718698a5b5c15ad0118208e2f32341g46ch8a6iaj5cck15012032" + "5c1534jl180120328acmaj8e341gjn4o34pq325c018aor18018a20aj32js0734466t" + "8a15tq341801328a2001323415aj4o8a5c1801op20324o3432204o01188aajjs", + "012342356789ab87cdef75fg1f1h5gd2d88iij4klmnopqr00hshndtpmdm8u8vwfxfy" + "z1ABjxCiwCw7bDDEE6F6dGn4s3He83pI8icdcmm8vJhfKyLKLM0NGLB0OmrPQMRg4HOA" + "SdABfyfg1hcTRg0Ntp3fdGU2VRhR1h1eeMLM2LU2rUrPWo23taSdOclDOlOccnn44223" + "s3JsvJbvDEbXTU6YalsYok4kuZJ6WoquvubvbXXEDElDlJuZJ6cmtqqmcmcTT44HTUU1" + "1eUsU11eUssY", + "01203456789a4b1cd11cef1ghe01i520j0kjlmjm1gjnoh9pq5hii9e9rstuuvvw6w56" + "x50x0yzyAfef300yfpB4mCCD6w76E7EFFGcuHFnInCyt34Jq3JKzKLLAAfDHBmtMmC2h" + "NOFPBmBQrll3300yzyKzxEhi4RJqsoOF34FPRSSOO8T84by1bSUQDVmDdN0dmdmDQDUQ" + "Ub4b342h200xxEhtohDVetheuva676SWXc1H1ccuuv6v6PaYdNNOEOa6d1E7dEd1y1tM" + "MYaYRExExtRSXcZMIZtMxtxERERS", + "01023456789a696774abcdeafa5ghgijiklmfn0nkoo3opqrsqhtunjuijov3vf3mw6f" + "kftpumxyzxA6x76feBC13D4D74Azz9Eg4wjeeBFGGHIttcko6gG6HJGHGj6ggcpKf3Li" + "iMikkoo221elNcO8C1PQ8PRQHeDS78G6yCPQ67g7hgrhqrqAL61Tg7C1n1jfGjFAUJJC" + "C1JVUJ1TO8yOyC7Cg7AEFAFGGjjff33DDPvTxWhosLLiXYYUUyzyXHPQrhMuHegco3g3" + "gchczxelulMuiMz9opxWsLf3lZjfeljeeljff3o3optpItpK", + }, + + partial{ + "010213_123", + "010223_013", + "01022304_12", + "01120301_03", + "01120304_03", + "01122340_01", + "01200123_12", + "01200132_01", + "01203243_01", + "01230402_13", + "01231442_03", + "01232001_13", + "01232421_03", + "01232454_13", + "01233001_12", + "01233123_02", + "01234142_02", + "01022302_012", + "01120130_013", + "01120130_023", + "01122301_023", + "01123024_124", + "01201334_024", + "01203104_012", + "01232101_012", + "01234564_025", + "01234565_034", + "01234565_134", + "0102130104_13", + "0112010301_12", + "0112013001_12", + "0112233412_03", + "0112233423_04", + "01122334_0134", + "0112340425_03", + "0120133445_01", + "0120321454_01", + "01203243_1234", + "01203432_0124", + "01212340_0123", + "01213441_0123", + "0121345365_04", + "0123120401_13", + "0123122334_03", + "0123302342_12", + "01233412_0124", + "0123401201_13", + "0123404345_12", + "0123405246_13", + "0123425451_13", + "01234356_0346", + "0102011301_012", + "0102013413_014", + "0102133413_013", + "0102322450_023", + "0102345363_146", + "0112133413_123", + "0112234035_015", + "0112345054_014", + "0120134245_134", + "0120324345_125", + "0120340142_124", + "0121013004_013", + "0121344254_014", + "0123045215_134", + "0123200123_123", + "0123202301_013", + "0123244536_036", + "0123301401_124", + "0123401201_024", + "0123404301_124", + "0123405425_123", + "0123455667_024", + "0123456324_025", + "0123456476_135", + "0102031401_0234", + "0102032402_0123", + "010203425436_12", + "0102130134_0124", + "011223345165_02", + "011230014330_12", + "0120133401_0123", + "0121340350_0135", + "0123030114_0134", + "0123044223_0234", + "0123200124_0124", + "0123242023_1234", + "0123242024_0124", + "0123245035_1345", + "0123403554_0234", + "0123413141_0134", + "0123413145_1245", + "0123422354_0123", + "012342450667_13", + "0123424530_0134", + "0123435013_0345", + "0123452450_0245", + "010201234534_014", + "010232456445_514", + "011201233450_012", + "011230013043_123", + "011234536768_470", + "012001130120_012", + "012003404520_231", + "012032430156_125", + "012034256324_136", + "012321013405_023", + "012330011435_012", + "012330232401_123", + "012334054534_123", + "012342055405_135", + "012342540554_123", + "0123456426_01356", + "010203011401_0234", + "010203240224_0134", + "010213014001_0123", + "011223341223_0134", + "012013012042_0124", + "01203245643736_15", + "012034130120_0124", + "012103410141_0124", + "012131403501_0135", + "012304010304_1234", + "012334022345_0345", + "012334231223_1234", + "012340344001_0134", + "012341513052_0135", + "01020345011445_135", + "01122301401201_023", + "01122334120112_023", + "01123001304330_012", + "01123001433045_012", + "01123024011201_023", + "01123440450112_123", + "01123452356335_045", + "011234563656_01345", + "01200134153220_124", + "01201301425320_013", + "01203243322032_134", + "01203432011501_024", + "01203445622334_136", + "01210342012101_023", + "01231223011412_034", + "01233042452330_123", + "012330454262_01235", + "01234015164735_125", + "01234563561517_125", + "01021345341301_0125", + "01120301122445_0123", + "01122340454001_1234", + "01123001122412_0123", + "01123453604634_1246", + "01200123202423_1234", + "0120234565768798_14", + "01203201434520_1235", + "01203234322001_0123", + "01203415567475_0137", + "01232024200120_1234", + "01232415122612_1245", + "01233004534647_1346", + "01234256467861_1357", + "01234536647898_7152", + "01234542506727_1257", + "01020131010456_02346", + "01020343566402_01246", + "0120231401205201_124", + "01202345142645_12456", + "0120344562367308_138", + "0120345362472428_714", + "01230234052301_01234", + "01230452055236_02345", + "0123243523355035_024", + "01232456401556_01246", + "01233454500156_01345", + "0123453563678798_034", + "0102010302420203_0123", + "0112010301244524_0124", + "0112300124122401_0234", + "0112345460514786_4512", + "0120032420010546_1345", + "0120130134130120_1234", + "0120340113012025_0124", + "0120345676633448_1457", + "0123434562467646_1235", + "0123454162060106_0134", + "0123456748426015_0357", + "010213340113010501_134", + "0112030112430503_01235", + "011223123415122312_025", + "0112343524366035_01345", + "0123401501252625_01235", + "0123404540130113_01234", + "0123423523563556_12456", + "0123456405450223_12356", + "010234014523342302_1245", + "011201300112241201_0123", + "011230013435303412_0245", + "011234300134122512_0134", + "012023040152366736_1346", + "012312234001544640_1235", + "012314253623016001_1245", + "012314500125502501_1345", + "012324155647789225_5093", + "012342302342454223_0134", + "0123452062473447_123567", + "0123456557805893a5_1256", + "010223400256788597_12368", + "010234567047258396_01245", + "0120345640716783_0124578", + "012340563778155662_01268", + "012342504165714898a9_013", + "012342515601513001_03456", + "012345404617784296_01246", + "01023435230102011601_1245", + "011213241201505675_012356", + "01201334252001201325_0125", + "01203245637687588745_1347", + "01203450561735515683_0264", + "01231442142305011442_0345", + "01233456154534782356_7620", + "01234056721868911821_1358", + "01234235567823757378_0248", + "0123425617807215914a_0136", + "01023014014556144501_01346", + "01023440025334366736_12456", + "0102344523344502230102_145", + "01023453467534533475_14567", + "01123445652367342386_03468", + "01123456677819674589_03579", + "01203214015365768765_14568", + "01203442533420677867_13467", + "01203453160501051738_01245", + "01231435600160563523_12456", + "01233042566774235685_02457", + "01234052160137655216_13567", + "012340563571083182_0124678", + "01234525647037237045_13457", + "012345536037284512_0134568", + "010234563750317898_01236789", + "0120324553642076458764_1358", + "01233445416274286709a0_0629", + "0123401201124023561264_0345", + "0123401564710178122901_1345", + "012345367884234992_01234789", + "0102344056074840488548_02457", + "0112013001241253014667_01234", + "01123101450637408445_0123456", + "0112345167870696a56396_12469", + "012003455336722672877298_134", + "0120130104133513460104_01246", + "012034156375877698a98798_014", + "0123024523261401700114_01356", + "01233004353067085635_0123478", + "0123403425167301282634_01256", + "0123423520062335573520_12346", + "0123451402012602278002_01258", + "0123453617788223404578_02345", + "0123454617890542499001_13578", + "01234546704158013170_0123457", + "0120324567894a9bba968936_1578", + "0123421456423556677856_123458", + "012342504675371801865075_1348", + "0123450673805345582873_013467", + "012345143670018098a26b8a_3215", + "012345422360578598a43b64_7135", + "011213010456455718019013_02368", + "011234231201506701686912_04568", + "01201334453401136376870198_025", + "0120134225012001562572_0234567", + "01203454657617018297a9ba3b_013", + "0123244567583680212321_0124568", + "012334522365527687657623_03457", + "0123345653271502277890_0234678", + "0123415260017451417485_0123467", + "01234353436062077879a4_034569a", + "0123456237233762468462_0123467", + "0123124553678996636723_01234579", + "0123140150521627012816_01234568", + "012334056207030186239a_0124568a", + "0123402562781597257515_01234689", + "012345563728236860941a_01234689", + "0123456718975a191b31018ca1_6024", + "011223144512141223364748_0134567", + "01200342456457204258454259_13567", + "01200345262026640726200126_12356", + "012134500667780150133734_0124578", + "012304056206578538855771_0134578", + "012320140523678059960167_0134678", + "012334202320353617340117_0123457", + "012334516746415167528552_0123567", + "012334545650768956967696_1345678", + "01234035644067122358014035_02467", + "012340421526014042741815_0134567", + "012340566323435672282363_0125678", + "0123425667606891ab4546ca_135789b", + "01234512062357867806019012_01348", + "01234530567862235662453001_12348", + "012345626787849819306a235a_12357", + "01234564378015450115799879_02589", + "01201301422035136756350113_023456", + "01203245607660140160144568_015678", + "012134235162708601519570_01235678", + "012313456273148729237301_01234789", + "012342054036234207403894_01234569", + "0123434135267884415923a0_0123458a", + "01022340252601170178022540_0123456", + "01023452130667863402677995_0124679", + "0112344567016867576780689668_02378", + "01123454460176383449604601_0124569", + "0120233452678926853a52853b_13569ab", + "012314516272578696239a3a_13456789a", + "01232414156470012823377014_0123456", + "0123345667614872902367016156_02589", + "01234003546401172668260103_0123456", + "0123401556367487017440150174_02478", + "01234042252674422340180140_0234567", + "01234231152331234260748660_0123467", + "0123450256014534234572839a_1234689", + "0123454667788001122339239a01_02579", + "01023045461708018930434a0843_134679", + "01023442345067027202788978_12345678", + "011213245012265712135801199a_013568", + "01234052647518899775185223_01345679", + "010213455601783784699a78844584_02478", + "0112345660707809010a9b416936_023578b", + "0120320134204532206307204832_1234568", + "012032452034463407322032344801_12567", + "012034567215638948013456155663_12469", + "0123044225044216011704804225_0134578", + "012312455067017445506774866798_01356", + "012314563001473014234775825647_12467", + "01233415467421897ab70190c923_023469a", + "0123345671891ab323c0250b2b28_1234679", + "012342543065761823650197425465_13678", + "0123450445673879922338566779_0124568", + "0123451460278679a89aa886792723_01349", + "0123453652170501840923364529_0235689", + "012345627438873823621662380190_03568", + "012345637879a16b56199c1951d2_13468ab", + "0120130450011336738493737a73_01346789", + "0120230401350620578223063557_01345678", + "01203456728497a44960b8842072b8_01458b", + "0121324567785001217178842921_01235789", + "0123042560766082680960680482_01345689", + "0123122415121567561501682367_01234567", + "0123145676864850977235233556_01234578", + "0123301401566714789549140130_01234589", + "0123302415236223300178155484_01234567", + "01233452642307183468699a4bcb7d_012456", + "01234025670828297a7ba1ca7a25_125689ac", + "012341153673078449a2b9c29dd6_013469ab", + "0123453056010734304523340758_01245678", + "0123455676856009605652859a23_1346789a", + "01234564787935409a2312016478_01235678", + "01123045674683679879016746453083_01256", + "0112312456472478851201240978566a_02467", + "011234304501633001341247486301_1245678", + "01234015623423014015623401155715_12456", + "01234254657837807823377880018078_12347", + "012342563523675635786719569442_0124578", + "0123425678955a76abca293d6e42_1234579ce", + "01023045160738943a6b931c5c4530_012479ac", + "011213044556047004455845040119_01234679", + "0112343536786097018a2997789739_01235679", + "012003435346036784466718017953_01245789", + "0123245627389a8938546056a775a7_02345789", + "012331040151064723859854859869_01234589", + "0123402506472840499a5449259b0154_13678b", + "012342415001616761416136238495_01234567", + "010223456478457659a559b6bcd5_0234579abcd", + "01231224500106501223012467066801_0134567", + "0123405221014061748640983aab05cb_0123459", + "01234053366137389aabcd9506eada_18a256b4c", + "012341122541126758655867072339_123456789", + "01234526407847790a45644047780b0a_135789a", + "012345501601377482453723743798829a_12356", + "0112304564450157734530016468645745_123567", + "0120345653571787985a170156491798_0134568a", + "0123245663237428247497281474148a_02345678", + "01233001424567702682269aa6264845_1234568a", + "012345607458360123179a4501745860_0124678a", + "011223144512606714682912140129601a_0145689", + "0123045667829539569523398218019582_1234567", + "0123145046013550143523011446674678_0123456", + "012334234562342357344578578978899089_02356", + "0123345627231225085901125a5627127b_02678ab", + "01234042565728744257239a0b49405c9a_01369ab", + "01234567891aab8bc9d88296895d822331_035789b", + "0102011301456772524502787252789001_01234678", + "0120314351060175205889317543946758_01245789", + "0121034251677521510142800151755101_01345678", + "01230456789a3bc81cd016919a090227_012358abcd", + "0123145213235226789313011323938926_02356789", + "01234035678930ab2328bcda73abedfa91_12468abc", + "0123452034234506722006233448342372_01234567", + "0102034524564502240748028948038a2402_134569a", + "011230244501362412300157788924459a5724_02346", + "01123450634701623463126247857850011247_03568", + "01203442566715200134839820423483989a98_1367a", + "01230435061201277804122923277a3b2312_013456b", + "012304500216754849507a0175044b047c50_123469a", + "01231450678578390150856778966778855001_12456", + "012334526113700187708798a0261334876a_0123468", + "01234561674678126792616a12bacb92dc51_013589c", + "01234564571886186401927945574564799223_03578", + "01234564761280586401458058456401122376_13467", + "011201234054367840011285693623120140_01234567", + "01201324563501078079130107202401355635_024679", + "0123451678097414a221b620a215161cb64d_025679ab", + "01234546378582601882233945a523424623_12345679", + "0123456786167349a535a573677335165b01cb_03567a", + "0112034536475003455038010301493a45124b_123568b", + "0112234567408912011845400112162a121623_123579a", + "0112344034560140678334407983019883796756_12345", + "01202345160114172028496a16147b20011716_125678a", + "0120345674789545747a93a1bc7ab73b7a_012356789ab", + "0123145001267385509a68abb78568ab9a26c9_1234579", + "0123421450426501502365017687144223763923_03567", + "01234562768489a1569abcde0deb013cdebc0d_02345ce", + "0123456416378270012337167064452382235937_12578", + "0123456507861298932398a386bacb5c98455c_0123469", + "0123456789abc6daed4c5081fef44cgd5h6h6i_13579bg", + "01123041560789786a608b044c04d807308e_6701d982a3", + "0112344567589a1bc93cc99d6071ce0a7ec960_01347abe", + "012304205246789724a21bc14692d18ae87d_1235679abd", + "01231245600156601278893778018923122301_03456789", + "01234354436164541789682689a561645464baa5_025789", + "01234560170158344572237234174558895845_01234567", + "012345637858196a359b3cd149ef4e4919_0234568abcde", + "010213456178797a3a68bc67d33c9e4da99f9e_01358acde", + "01120334500675783950750150066a757b030175_0123689", + "012032042563572820898a0432252832636b6332_124567a", + "012304567338179863ab6cd6e001fd7aag4617_1ce7530a9", + "012334567517400140756856342317751756014001_12678", + "012345366758147936822336674579823658452382_02479", + "012345466378920ab448c5866345d4b92302_023456789ad", + "01234546716001829660a623b039237bc2dcaee8_1246789", + "0102342564071489ab9c126a2d83e0fb64gf91_0234578adg", + "0112132412501667245839011613015a3b5c50_01345679ab", + "0112233456452312700156701223344568566845_01234678", + "0120324354678695185443320120320143865418_01234579", + "0123145220232001362023372018011820233920_01345689", + "0123453617845245846901905223693669235290_03456789", + "012345673889abb88c890889878d7445085e_012345689abe", + "0120130142200546174220485905010a46ab055c05_134567a", + "0121345165637897abb8cdefc376b7abgc5e855h85_1248acf", + "01230430566738098a26673bbc38232d8eb00e_0123678acde", + "01233454600347899a7a47b8c2de8515bd8589_12345789abd", + "012345266427400826644045095a452b2664264ccd_013478b", + "012345340645175872231701177223453423457217_0245678", + "01234560789ab2c79bd2e9fd5ghii5gdjf5k0ed8l6_13489bh", + "0102134567648298262a8a264b3c2d64124eafg8_0125679acd", + "0102324305260278327978ab744c9d8ae0c18f_012345689bcd", + "01023456725328398a483bc41c924d7214e1f1_012345789cdf", + "01123004056761873039301ab01c711a7de7fc_012345678abd", + "01203445413678977a2b0c010cd2edc8bcc8d2_02345678abcd", + "0121345678973a91bcc77d62e4bf5g9740ef1h09_123458bdfg", + "012340015465721701893889237217014054011723_01356789", + "012345167289a52bc342d712e1fd2b95455ge814_13469abcde", + "0123456782745045199819011950019819829823458219_0346", + "012345678309263a231a7b1cdb1e7f31ageff5a7_023569acef", + "0120130456787696a0010b5008cbdaa00b9ea0a9_0123579abde", + "012334566735564829a91ab6cb35d2342935a9bdd229_024679a", + "012345423567716713712086677109a8421335866713_034568a", + "0123456780521639018098233980988001166716012380_02478", + "0120134501678456458967847256672045566772200113_134789", + "012113456789a4a878b71c212ddea48fd745cg1c21_g1f26b8c95", + "012334456241789745a474a42b5cda235e355f3523_59b3ae40d8", + "0123453674184118741930a4baa00cda01a1190e_01346789abcd", + "012345624789949ab3c0d394d4474e9416b36bf994_2c867e09b4", + "01234562787209ab0b62bcdae00b8a78f4700951_012356789abc", + "0123456786709ab4c30dcea360fcf40gceeha0a3ce_1247abcdfg", + "0120345326017668929a0b2620344cd29234ca266826_18a29345b", + "01234156783996ab39798ab3cdbd78cdab8a1c01785778_02358bc", + "012345067889abc0d4e7d3b68f37g2c27hhi17j2jkkllm_035689a", + "01234560170108890260464a4b60088c08604660088d60_13567bd", + "0102301452467889a1144b6c4de8feghi4jgkijlflaffl_01279adh", + "010234567897ab2c7d4b477e14b6474f29ab14ga7eah_06b83ag1ed", + "0112034506473803690150033a0306011b1c454d12_01245689abcd", + "0121134056789a1bc16d9ce91fgh2h69890201560i01_123579befg", + "012334353627613890024a27b42338bc4dec90c49e_012456789bce", + "01234250060758599a582078b6c3d40bde3eef3ec3_012468abcdef", + "0123456706801901041928452a80284b4c1d012e4b_12345689abde", + "0123456778953ab5a9345c0d8e8bb5f8b00gb08b4b4c_e5f7dc0892", + "0123304223354657420830354923420a46302308424b3c42_134689a", + "0123452445500645272445505806890a5b500a585b8c_012345678ab", + "0112134526731226849a695bc7355b1326121326d3ec_01346789abce", + "011234504673654634466550872901122901506546347334_01234679", + "012032435416755443012032430168168901203243544332_01234678", + "012314456738529001692367144552389014450114014590_01346789", + "01232452367893a08bc013125a3deaa05a3f9398g89893_0134568bcf", + "01233452054652235278013495466a52234634466aa76a4652_123589", + "0123401556376801741501403701562315013792569a7440_1245678a", + "012340156436642301157240236436237289977289233664_01234589", + "012341561778099a979a788b7897a39aac977867cda3_012356789abd", + "0123425467892a42466b462a0554460c2d48422d050e_01234569abde", + "0123435456600789392ab40c252d5408ec43566f5625_0123456789bd", + "01234543657880595a9b097cd0e70959f101459g099h01_124579abcf", + "012345675638596a5b3c86de6738be13fg86383dg5f36h_cge749b021", + "01203425167869a19bc3de9fgehc1hif7j1j01cijk0dc334de_14689bd", + "012034355678795ab56cdbd008e6fbbgha5ab5db83_0123456789bcdgh", + "0123451261728779127a23a4b8c4726d7aa4967996d060_01356789acd", + "0102234567829ab8c83de4835a5b5a682f4f3g23022fh04f_014689bdeg", + "01122340546512764001654012546576402301382389120140_01234567", + "01123045672653014548123045530145306701122667799a67_0124578a", + "0112341536273829122a30151b015c15381bbdbe1b121b_012356789bde", + "0123405647126201804094a947942b470c7d2ef247fg94gh_9e43a50718", + "01234536752889a8b2280302454cd2ed453f5db35bg53f45_b143a6fc05", + "01234542632372483123879887a97287b29887a97287caa99887_013456b", + "0112231240356778011240847867593523124001122335594084_01234569", + "012324566735705623891aa801671a7001677056675635562370_1234589a", + "012340567839a267b67c6797bd567efgb2059g7c699g2h6db6_02458abcdf", + "0123455067458612919a6b0158cd865e5886c56fg21216126b_bad6g14e37", + "0123456786749067a3b886bc8defdghagi59iejhjkgiglljjkk0jk_12357f", + "0123240546375208232905529a29244b9c290d3746050d5229_01346789acd", + "0123456507689abc8dcea7a3df9g0hhijkel6mmncjloe4jepjp0_134578abk", + "011234567001356770671228125667958a0170122812670156677067_014579", + "012314524670011438706946877001237052873887700114017087_01234578", + "012342302301154201678930964a01891567581501422389964289_0234789a", + "0112341356704802019136344ab44cdeb4fddbb191926g36_de0c217f9b643a8", + "011203456786739001a1403b01c573676de603d93f73g63h0301e6_123568acde", + "01234015640140521501645223375240892315526415a6238a896401_03456789", + "0123425674173623809a01aba7a242c217d3427417e1abbeab23_0123456789be", + "01234556040768455604524569045a2b52ac01560d52682e525a_12356789abcd", + "012345646745869883abb998ab0c862370676445700c5d456486_012345678abc", + "0120320420153267011806393a206b3220066b3c6d322006670620_0234678abcd", + "01234256678798a0800bca2ded3e3cf87ddga07087ca0h7da0_123456789abcdef", + "012345367890ab6c7de22fd1291g5dghfij2aedkjl29imlne2no900ggp_1248bej", + "0123456764524528695a6445ab5a522c5a5052456d6423456d5001_12345678abcd", + "0112341513267801797001155a782b1501703c7d011512133413122b_" + "0124568abcd", + "01234205546705082942544a05ab540c46054a6746ab54056d460c4a_" + "0134789abcd", + "01122345467089943a612b3c231d12e11d3fg380hb9b233chb2370_" + "01234578bcdfgh", + "012013453652789a1bc93a930bad366e6f208g167h6ih67hj178_" + "012456789abcdfgh", + "012034561345560120341345344572560120011372346845564568893468_" + "01234678", + "0123425001678603941a18b4ab1a12bc2342d686b4ab1858d51aed58_" + "12345679abcd", + "011223145678984a456b7cd1ae569a0fag799a01ah791i2i5a016b_" + "012346789abcefi", + "0123456172389aa33b03237275c101d99a3ba33efgdfh4hgida3a0_" + "012346789bcdfgh", + "01234567829aa66b369ac99d94e92f94dghc455i3i26d323cd2f67_" + "d74f931ha25ibcg", + "011203452601070824692a12ab2a266c244d012412016e262469ab4d08_" + "012345689bde", + "011234560748941ab0c54dd11e944d016d56c5b0b3cfg3b3chiccf_" + "012345679abcdefi", + "01201301133452204613011334204613527552012001135234754668894634_" + "02345679", + "0121234565761821901ab3c0cdef1gh2ei21c6efhjcd1gh25f90hj_" + "01235679abdefghj", + "012304356217869823793562865a9835236286017917627998233501046286_" + "0234569a", + "012314560701143898a11bcd5e2cf1231g2chc2ciaa4426362a43j_" + "73b248ac15i9gdeh", + "01234506476447420158790a0647640b45422c237d4764060a0664477d_" + "123456789abd", + "01234567786736297a64234b36672c64232d607e67364b23647a360160_" + "1345678abcde", + "01234352236107527890a61bc4610107569b52612da6dc90528507788552_" + "0134789abcd", + "011234015640077819053a7b01193440341c011c3d3a050740343e344007_" + "01234679abcd", + "0120130134451367782082782001672013823478132001208213786796677820_" + "01234569", + "0120134256570148054220590aab424c204220050a20ad4e42200aab5705_" + "01245789abcd", + "012334524623788097159a01781534529715800178809715522334469a789778_" + "0134679a", + "01234056607856981ab11ab7b1a32c7de91ab1984084234098b77dde7db7_" + "01234589abde", + "01234225424617489ab4c4b4abb22db2aba19ae9179fg7677hg7fie9_" + "8gh45093eic627ab", + "01234516027835975ab9359cc4d401ce2cce23f25g270292hd7845f2c47cc4_" + "a816fgbe25", + "0123453621178983ab3c2321d19e59dbfa3g0123h2aijdihd121d1jd_" + "012345789abcdgij", + "010234526040178991a3344b343889910160c696d6efe8890260026g1h_" + "cg9467a83e02f5d", + "0123021456617848392314a0ba0256c5239ca056dbba61a0db023e2302a03e_" + "012345679bde", + "012314563572802356019a49b77214cbb780d88535499a4943354785b7aecb_" + "01245678abcd", + "0123243567869a9634ab7cd7676a8618144eaf4ggaah6a3d67d7676aah_" + "02456789abcdefgh", + "0123431415566789369a910bc0dc822e153f0b23b26g6h56fijf0jfi6h_" + "01345678bcdefghi", + "012345603718297a01049b1c3d183ef3743e1g41e4hd1g3e23ifh32329_" + "db86h09271ia5gfe", + "01233143536517898a31172823ab2896438a04c54c6528c2964c04d0044cc228_" + "012345678ab", + "012034566718799a199bcde1feg1e8c5566779h6fe739b9i79df9id37317_" + "123456789abcefgi", + "012034567849175ab39c01d6563e34f349340a50499gb33h6hd634499i34_" + "023456789abcdefh", + "012345400672899a3bc71c23d6ad89d66538d63e897223727dd64501403b3e01_" + "0134569abcde", + "01203204015126477804193ab3b23a266c9d26aef520044g5154a5h151f5h1_" + "60fha95b3478g12", + "01230456170189a07b173c7d2aaef8gfc7e5ae2agh7bbi23dj3cck6h6l7bkmmn0117" + "7d_023469a", + "0112134035011627403801494001124a49014b4001132c124b166d1638122e1216_" + "012346789abd", + "011231345657363456806940343669349a4bc5dcc956366931018001c93136be56_" + "03456789abcd", + "01123404526786986ab0049c86826d2e98121412fc262eeghf8698iehi6df9_" + "0123456789cdefgh", + "01203453367585099a6ab6ac504d64aeaf75g04d9ab6hbhi098bgj9aghhiaf_" + "01345679abcdfgij", + "01233445673724789a9bc3ada22437e0c34513fcbfc44g34baeh13ad4ic44i_" + "01345679abcdefgi", + "012340356476871976647640582a8701352335584064872a76644087580119352335" + "01_1245789a", + "01234223255637161889abc2d1e1f21g188h8i012ee8f06a2ec2jci9c3358i_" + "c368hgi5a7f4e910", + "012342256276862972a776b008ac8debfe6gb442eah7aceb62ib2362b447h7_" + "iae8f64g71c309bh", + "011234564770286334015647126370479a9501703456633447563495706356019a95" + "5663_1245789a", + "012334455678934a12255bc0d4e3239f09c0c18145gdd4gd5h12ia25dj343f4a12_" + "j5cebg9f4d73201i", + "012334564728696abc3d133aa413acec86a4f8234g86344h6cbc5i23j35j0b475i_" + "012346789abcegij", + "0123451670017870120179702a01147b166c4d7012144e1401122a707b1612231216" + "6c_01345678abcd", + "01234564786790147ab1c6d76764e6bf3bbff515g3031h147i676j3d67d73d7ig3_" + "0a1fgi4d928b6ce3", + "012023013415678549a0b6c2230120d034a07a67a015ed0115a07a6723c2dcc223ed" + "8eed_013456789abc", + "012324506517890a9b9c19d13de79fg823175381h09cg801814ih024j2244i8i81d1" + "_2bj39cdeh81ga4i7", + "012345567870922a233bcdd2e2cd438c15f9g114g13h242acd2392233h3id3cd8c3i" + "_897hi15bgce304da", + "01234252657253879620653a9673533b87c5defdde53d86g9hc53i96ehj001j0j996" + "6g_1346789abcdefghj", + "01234256371778233617917ab22cb2de9fe9gd3723h2de7abighhb7j23i1911701e9" + "179f_a3b645die79g8fj0", + "01234354306785295247ab0c43b4d8302943e90c4f0g3090a40g8hhi8hd8da9jija4" + "434f_c5072g3j9fhb1de4", + "01234035640740189a0b07bcd7d57bbe64f4gf7124h1bi1bfhh1fhbigf4h8j18241b" + "188jf9_4j30hg9fa72i6d85", + "012023045016011678793a3b9c9250cdd6e6ef8gh9hci792e103231392793j92233b" + "139289i7e1_06h9725b3e1fgdic", + "0123415212644789a8840b47c72de8f0851712235250g0f0afhe7i17eahe7iaffbgb" + "122j12fbaf_064hbf391ieg75dc", + "012345645789a6ba15c6a01d5ea6355f396g139h1dg80da0i13935a68939c615i1id" + "43jca00da0_2ce76fd53jbi9ga0", + "0123145678491a154b14bc4b201517144dde566f15142g5615fhbi56ijbk4b6f4d14" + "56150120011723_0134689abcdfhk", + "0123424567895ab4bc4d0e8fbge24542fhc8gibce25j5k8945c85alb0l0e8ffmb445" + "5645bgb4422n42_0134569adegijlm", + "0123456728197ab26c67de20266f7g1d64hi017jdehkhb208lb2hbhk1d01hm20hbdn" + "671d7o4p64677a_13579abceghkmop", + "01234567893aab5864456c89de8064585f5g52233a8h58528d6723588ddi8d0j3a58" + "803kal585258800j_0134579abefhjk", + "01230224561778392a23bc7de0df354g022301173524bh4i233j23023k7dblbebl24" + "e0014m02be177ddn7d_1346789cdegjkl", + "01234567860469ab525c230d5e5f5c2a1a3g0186c80h8i3j232a5c3k45862l5fab04" + "451a5223013jfmjnao_0134579bcdfghj", + "0123452647892a232bc87ddecf7d475a3gdhd2aidjakjc233l4md245232a5a4mdjd2" + "aneoc8308p3q2adh5a_0134589abefhjkm", + "0123245678492a0b0c1a0152de566f56b54ggh56ai1aaifj4g0k6f0156fd240b2l52" + "6f566f522l5752fd244m24_0135689abcegikl", + "0123453067189abc5d144eec0ffg9hi2jkg53lm1lnh35eo601kbnpqn677brm1k0fs0" + "tq2tbc3lcu7bk7ngvnln_012357abdefghjopst", + "0123242546405768690a2b7c259dbef2245746cg6h407cijif46cg256h245725f246" + "577k4l24014m2b4l2469469d_134679abcdeghjl", + "0102345627687318198a1801029b1c1d5e270fgh0227345i19019j8k7l199g8m2702" + "8n19i001fo19gh9g271di05i5e_02345789acdehlm", + "012345161762781917ab8ccdce2f9g78168c6417ah627864ci8c6ja0160178ak19h4" + "4l01a0012m622n1716ab456478_123569bcdefgijl", + "012345565067620189abbcde3f1ghij88kfhkc56cil5mf3aenfhnoompdab56n33aen" + "j77aj7j80j89qon3rns9tss9_1234679abcdeghikos", + "012345676268492abc6267ad2e627fgh4i9aj62kel60eb4j012e621mj61g6001bc60" + "eb672een7f1g4i4jeo2a452ej6gp_01357acefhijklo", + "012314056278925abc5d6efghiajcakl62388ham38j3013nopqcc94r9314q5pk05ro" + "klopsntqtbnu01hlg0bvsnuwfg9j_1234678bdefiklpqrs", + "0123456137505223893abc0d50010bef1g848h5e505eef5e0b0d457i238486bj0152" + "235k4550523723523750010b1l010b7m_013479acdfghij", + "01234562783923a0b76a949c85dce67839fgdh9ci7j3cklj6b014m9nof5pejoqrnsj" + "n53nj33n5pst78ton8of7u0ii7n4_12345678acdfgijkpu", + "0123456572589abc3def14ghij5bdkb3lmmfgn4o3d8pimb801b35bn8j3ijqignon3c" + "ako55bkrcsmtj3ij3c23u2j3imqivq_034569acdefhikmqst", + "0123456782298a3b5c829d2e4fg423453h8g2979g49i5j453k5j806l82238gg42982" + "8g9dm86m6lm82e82233k23804f2n29ko9p_123469cdefijkln", + "01234546735809abcdb59efbgchb2ijiklm2n7op2ih07qqd6rm77q23qdoe7323fh09" + "a4fa8qqka4fhksthqke27q1746238q8c_0234689bcdehijlpqr", + "012345677813953ab46bc9cddefegfh35hijd567jklm5nkoe2b4d5m6defegf45fpe2" + "2hqnhrnh2hile2feqslmfpijiltpjk67_0135789cdefghimnpq", + "01234553672890abc8d5364959d59ed21fghij7kl9mnno876790b8p70g6g7qen0g90" + "ab0gj4iag187c84l1fj47k67p76grplssm_0123478acdfhiklmp", + "011234567089a9b3cde3fbg3fhiaae8jklmnopoqa6rs7mtu4vw7nxwkuy5tjsz9p3dn" + "A57Be4w7zC5tDiDoEf7mB8mF7mlE_01234689abcdefghilmpsuwy", + "011213454056077845914007a440014bbc4defgh6ie9ijef56eaeka440077l914507" + "564007017m07011n7g0740456iio56456i4007_01346789bdfhilm", + "012023141567891a01bc20d6a81a2eabbfag8h6i5d01j8d61k2j016lbma820j8ab01" + "23155d1aa8j814dn155dao1abm15abbccpkq1a_1234579aceijkmn", + "012345611789a8b13cdea6fg40hdij01kflmnl0oen3pqairs9f5nia6itubnq61kucq" + "pu68qiu4013p1v3ccqdeeqhdqj9weqv90xjs_0125679cdghjkmopsx", + "012345678549abc4d9ec3fghdi6j499kk7lmd9nlgcodpj7m5730qnr66j8riqrs3etp" + "ueknvi45e8ug8rr55krsueiq56e86jbswbwx_1235789bcdhjkmopqs", + "0123456789abcde6f6gcch57icjhbk1aalm05n1f0f21659of61fefp6bil9lbqb7rrc" + "bi1a679sp6epef1fef23kg9kp4epeffq3tt88u_03478bdefhijklmnop", + "012034526728594a5b20935c59346d52758e4f28eg4h8i9359522j3k34758l677552" + "935975282m8e6n6oo0280p8e6n525b34bq93344a34_1468abcdeghijln", + "012134305678349ab3cb4decf8gh6ic5jk3leg8k09b3ec5mc5na26b0opq7krse4j78" + "mi8kebb209gbkrgh5mgbrtmupqhvkr6w26b2gbog_12468abcefhiknpqrw", + "01234564678669400ab89c8664407d67debfeg0h64427d67b8de647d67402i64dj7d" + "bkb8bl67863m42b8en644o6964867dbkde0p40427d402q_12356789achiklp", + "012345655789abcdefg8h5hi1j6hk11chi511jlmjnoap6ekalgh2q231rst85qugvg8" + "53wsfxw4oyk15zmA45mtsxsBls3Cw4tD434E43_1345689adehijkmoqrtvyAC", + "01234567689ab6c9dc1bef2ghijkl73men67opq7qrps58st5fgl0j7s7ubv1bwxnuqc" + "58l7jkko4njkqx46h4ylwxj2zwxul9qc67464e_012345678abcdfghjloquwz", + "0123456017680439824a012368826004bc0d4e04391f4g683hbi0123822368041j3k" + "4e6023l6011mc16nbl826ol6opblbc6obq688260043h4a04_123579abeghikmn", + "0123456789a27b4cdef14dg33h93ijjck93l93k0manoo4hpl4h13lqdg390ag2l89c5" + "hqld5ern0fc5mr0h2l7nsm4clqtlnt2tlq237n3hh17b23_124789abcefgiklmno", + "01234567089ab4cd2e1f8ghi0hjklbgmno1662pqrsnt6nfuhjsdsvvwxym9ez868Aip" + "pBtqipCab4ya45D5q5yErv622tEct4nbFGxyhxqFGw_0125789cdeghjloqrvwxzAEG", + "01234567897a3b2c1bd623ae2fg467h3ij7aklmn0odp3b78jqenrserteer23ruv7ar" + "w7a9idb49bkhxmm9pyu5jw737hw7zAqBjqzhhCklijiv_" + "02356789abcdegjlnoqstwAB", + "0123456789ab355cde6001f4gf86hg72ijb51g9i2k89jkdlmbij6mk3no9ikpidjp35" + "3qm22kmab3r6afmab44ois6m2bs2b4b3notnutism2no234om2_" + "123578bcdiklmopqrs", + "0123456272895a181bcd75efag0e9h8aij01kbk6lmnidoc81bp00lbqrs8tuqcd1c18" + "5defvtwxeccdmyb2iknwbzemAnBe72emxbkrCkb2ldnCCb_" + "01346789bcdfjkmopqstwyA", + "01234567689a6b5cb26d2e2fe46b2e2fb24g455he4f9fi2j2e452ff0e42e239k890l" + "f9f09m3n0ofpf0oq23fpf96d012ff90of0f99rf9f09m0l_" + "123456789abcdfghijlmopqr", + "0123456789abcd29efgaheiej48ikll5bmabnoappqkrstkl9o97u88ivnwx3m1yqznA" + "Ba26aAlaCp42uc0udArCnoieptqxyd26bw9bi9wDEi9b14ab_" + "035678acdefghknpqrsuvxz", + "012345602776581901455a609b6419011ccd4e9f4ggh6i6019ij6i016064ijgk6l45" + "4g1cm5gn2o4g270145641c5p45764q604576gr4g272m2o5p64_" + "01346789abcdeghijlmnopr", + "0123456278590a1bcd5a2e01e1f3gh3e0ai55a6jkilm3cmne0m4230gj44iioj4p08f" + "fqjpqdljqri58f2pstghespukvu5kipu23fq5982e0es2p8fpuu582pu_" + "135679abdfhklqrtu", + "0123045267189a8718bc59ad8e2fghi6j59kglilm8f6no9m8pm859q0bjampo0rsn9k" + "k0tq9kjuepuv2a4efi9aj2vwafctxybcqxtq3fctc552v33fc5233ggh_" + "0345679cehjklnprsy", + "0123045678193a8b1a9c1d9e07af783a233a1a01g0ah2i19072j8k011aalam2g78g0" + "019ekn9o04191a4p9504199o011dam2g8k1q07kr78011q8k07_" + "012346789abcdefghijklmoq", + "012345677869a67472120b9cdefgcfc5hcij5k452l1mn1ok94pn5g5kq7pi2kc5ri2s" + "9t5gusojvdwx1dypzp42pryAdByzCu12xCzwn1zp4DroEvpr1d_" + "0345678abcefgjknoprswxzA", + "0123425674789abcdef4ghia4hj6klmkno7469p19qr8d7igsp6tq2suv914sidep1aq" + "q37c9at7pnvmawxeyzz77AAB8C7Ajs1DAEF79a3G7A8AF7AEyFF7_" + "13456789acegijlnpsuvyBCE", + "0123455637458498822a205bc94584cd8e2f235bg00hbi4582bj5b2345g0840kcgg0" + "20823l23cmc982988nc982cglobpnq3l0kcm23823r8nnq82233s82_" + "012345789abcdegijklmnoq", + "01123456789abc1de4fag9hi2jjkclmnelfomeelp70q5r9a45esjtsuv72jqbwxyubc" + "kz95dAlBCpdpn5mnmenhuDbjgnhiesjEFBlBxeGF0w01aH1d2vv995_" + "01246789bcegilmnstuvxyAF", + "01202345566784397a7b6cd0e1dfgh46igjklifmino931p7cqlrrslitunvswaxf1yd" + "ryhodz8l0Av9dfxBtd8CDh8p62pyCj23ydsEd0sdghg2200xd02062_" + "0123456789bfhjlmnrsuyADE", + "012334506789abbcdbedfcgaghbc3b1ij2k301gd9llm0ndopnqk2o7f0kdodb3b9rf4" + "k3st5puk3fuh7fhv7w13axgh0y5uzsuAkBxCobqgukto0kobDq8D3b_" + "0134568abcdghklmnopqtuvA", + "01234567189abcdef2gf243h23d27ihj3hkaj73lm9nkkjhjh6gbbo6pjqh6hj1ron3h" + "cn0ce523s9t0de1siuvtpid2wpgxcopio3co1r0r0c3lvgjqrkkqrk_" + "0125789abcdefghijlnoqrstuvx", + "0123345647899abc5dbe8b7f388g7h3447hi5289bj233038309k7ffl7h9aem013034" + "898b47389n5od9895d525o23d98pbe3q34479r89387hhshi3q8bem9r38_" + "12345679abcdefghijkmoqrs", + "0112342563745879346a2bc09d7463799e9f7g340701799f124h2bfi259f0j07127g" + "6c749ec00112076k7425015l12254h5m6c12012507124n747o4p0701c04n1225_" + "012345689abcdefghjklmnp", + "01123045463401276830935a45125b6c5d014ef5341gh993h930hi463j2k013034hl" + "1gcm6n466ccmhf343o12016p3001122qor34455s4t12013045344630016p125s_" + "012356789acdefghijkmnpqs", + "011234560768507956018a9b68cd1e505f078g507h07565c7i685056075079j5683k" + "8g0107glam1n8g56688a567o68500107561268p1508g5ccd3p5c500q3k0r01p1_" + "12346789abdefghijklmnopq", + "0123456789abcdefge8h6ijklggh74mfnop0hqgn0hrhst9jfuqvwxc2rg6seyzllnlw" + "4AB9zl2mhegdefnyCDB18tdms4Ecp8yFfuC4dq1bs88hhGnytk4ns4te4A4tC4s4_" + "13579bcefhknpqrtuvxyzADF", + "012345465745894a7b579c457d57507e8fa86g0h2i2j8f2kk446hl45575mi74afn45" + "506g89a84a0h8f457ok42k57500p2j2kk4a8454q4afn010r508ffs8f4550890r46_" + "1346789abcdfghijklmnpqr", + "01123456570839013a6b918c7dbef5566beg50bh08573f56f5508i08011j8k0l6bem" + "0108568n0lno1p085q6b56be6ber56500sbeem0150561t6b565001088n08011j08_" + "01245789abcdefhjklmnopqt", + "012304567897abcd4e8fghij6k90l2g26m6lnopmqrdobs0tusjqkv7wmn6mabta766m" + "cdgxyztl04rABrwk6yeu56tCDgDinEiFkn1g8w016y1wwk1l8wbFli8fgx1wijus0D_" + "013589acefhijkmnoqsxzBEF", + "0123456789ab8c9de4cfcg8h1i3bjaklmn0opqjbrfsl604t3iti2tuscfgvhwax9gym" + "z801g0xk8gAe8cvaBt6BcfApabmo67e6dCmDmoAezA9d6089rf0o8ccffEcf60Appq_" + "0123479acdefhjkmoqryzACE", + "0123456467178492abc1defcgh2gijke5lmn4lld7ode9217pkcm904l7q34m7brld7s" + "amjctai9gh5luv922gw592sdhxam5li9ghqlijg3y37ocm0ymo0ctuft01jffttu_" + "01235789abcdefhijklmnopqvwx", + "01123415677819ab9c305d36ef5715347g1h573ihj5d5kjl15785d1hdm01deef7n5d" + "hj1h15hj5d1hlo36jlhjlo6736jllp1h157q30jl577q19dr5d5s9cdt15jahj1h5kdr" + "hj_01245789abcdefhijklmnoqr", + "01203456789abc7defg1hi8jkb0k15klmel3n86o2089563p780kkl3o5i5qh8crhijs" + "ometsenhhjc36mmun72gqmjsgvmtw5g17dn7nh8xh85q8j1wwl8x1w155iyh211y56_" + "02345679abcdefghiklnoqrstuvx", + "01023452672890abbc2de0fghi3fjk9l1mno01pbq352krqp5kcst3b54634uq4cq4vw" + "exydcztvjAdBuh0dCDb0Ek5k6s3bsD6se0fy46524faji046hafF0dhpdBpotaoGpohp" + "i0ae_0123568adeghijnptuwxzABC", + "01232435236738294a232402bc011d4b24e0bc0ff67g024h0f24023ie0jk4h670lf6" + "23me4nmop302230ff6p3me3qe002me2467mr0f2s6j237gf6me0f02240l6j4ne0017k" + "jk3qme_124578abcdefghijklmnopqr", + "012003245206203728099a2b2009bc9dce9f2b2009bc9gghei033j2b0109205203k5" + "clbc9g093mkn52o3ce2b3m2009bc52k52b52k5kpce2bei9gkobcceeq3rce03bc202b" + "2003o33r_123456789abcefghijklmnpr", + "012324567589ab6cca9deffgg9h9ic3j9klmnfo7p5h93qnr56k6snamtd9duag7cavu" + "diiw4vkitdvxnrg95vvyabr289nrizfg9ig97k9dfg75d1vxsf9d9iuaf7e8efp5p4iz" + "p57k_123456789bcdefghimnpqstuvwxz", + "01234516531789abc4bde00fe09gh8fijk9lmldndm0fdopqp2ri899p9l23iks8p245" + "mltets69s8rie0rufrv5bw530flpe0dmgv9gw93x45esrchahshac4abyxv3gpjggppq" + "kv3x_02345679abcdefghijlmnpqrtuxy", + "012345467871896a278bcdeccf57agf4hgi3j4klmh45bnojj6p6aqk8783kragsit57" + "7k013u0a50v5i360pwhgjpoj57jpp6dvgswhsx607k011b50rapwbnwmwrra0a50xnsx" + "klgsag_01346789abdfghijklmnopqrtwx", + "01233040567889ab7345cdef8e78gahijk89el0h24kmbkabnokppqrqhrlhej9ansbk" + "kppqo2tgbu301v89cw78bkgc1x7e01kp30abo720s7catgcwca1xstab01s70ltg73s7" + "sts7lj23_0123456789acefghjklnopqrtwx", + "01122314563745481214905a454bbc7d9e236f123g0156233712141h4b233g14451i" + "bj145a5khl7m4b1237bj149n1h2337bo7p4b90bo3714455q01hl905k122337147d4r" + "12144b12sb4b_012346789abcdfghijklmnop", + "011232456574189abcdeef45ghi0hjck8lmcno270201183pmdqii3o374deah6ep6om" + "erqnns7t6eurqos9vuwuawefx6gy2x9gs9eznskesanoawwkke322732gyo3ezhwobmc" + "cwmc6eer_01234578abefhijlmnpqrtuvwxyz", + "01232024560607280669066a7b7cdebf20eg4d06dh077i244d6a7bj5208k280620l2" + "8k6m56jl6nj506de6n20eo24204djp24jll224jljq07jl4dl224de4d24l206jlerde" + "7s5607202820_01256789abcdefgiklmnopqs", + "012013340135130617787901a2ab3c6d0e17dfag0h0613796d7iej3k0106dl6dm3ln" + "20amdl17a2011o13lp066dm3am7q177idla213177qlr0106133c6d130106dlis7i01" + "177i016dis0e0h_01345679acdefghiklmnpqrs", + "012342454223067839ab5c9d50233942ce5ccf5g9hei4523425g235j0k4al4454m5c" + "3945cn239hcfce7l5cab50co425jp0233q235cl4422350qr3q5cceeip07p7s0t7p50" + "454a234245500t_013589abcdefghijkmnopqrt", + "0123456758971a1bcdef6ghif667jhd6cdkelkeflm7nmoh23f67pbdq3di36rsqt3us" + "avke2tiem0k0kewx2i42xik1ba4536dqi32iavwk3dpbdqopiei3td6gdgtdt3k1pb5t" + "y2588u5te11b_012356789abceghijklmoqrstuvw", + "0123245674189abc40de7fe901fgdc1h459i8jkl0m31md23gn0mlo40bp18014056dc" + "q2klrpskdt6beubp45v9tk746btu0v562445wi403xjw30wi0vyjxzg6ve5d6dg61vx1" + "de1vv99u9iwijwwi_12346789bcdfghijklmnoprstuvw", + "0123456782894abcdef4gfh07aai74jaklmnabjopqrgstnd0duovu7jd7qj1wnllgxt" + "uondpqpyoigswprzmnnlpv1wlg0nnd1dstzxicoiuo1wgs7a01d6b3gf6fg8d6wpe71w" + "1ee7def8eq7aabde_012346789acdefhjklmoprstuvxy", + "010213455601047038971a1bc39d3e139c9d1b3e456fbg1h04561b019ibggjc30k6f" + "bgbl6m13451b1h013n13013obp041bkq56c3450170971bbp6r56049ibs45011b016r" + "700445705t97tm9cbs974570_12356789abcdefghiklmpqrs", + "01023224567892abcd2ef3dabghaa0eij3021klkgl01d9im90ngop2eqjb0j9r8sbnt" + "7ukp902o0o322evmb0snf3785fqjwij9f33u7u93ntqjxd903uuyq5imywpv93vm566r" + "56q58ya9bgabdaa993bgqj3u_0123458acdeghijklmnopqrstvy", + "012345343667789a9b5c45345d369267ce45cfdf5d34366g7h2345gi347j3645gk6g" + "929a9223jlgm363067347j6n6g5cce456ogp6ggkgpcq6obg676g6n7rjl675c343623" + "gp929s92236g673t7hbg45343t45_1235678abcdefghjklnopqrt", + "01232456789abc6c3de2dfegghijibkl43l3elegg55m6n9opa4q070rrisnldl75t7i" + "qsucncvnwxyj1wk0zA6bfBlCij5CCbBD248EbBEAF9eltF9Gnae2ek7ibcdHHfIancyF" + "bJ2401zxyzyFtFkK_0123456789abcdefghijkmnpqrstuvwxzBFGHJ", + "012342526738294a9b5cdef9agh2ijeklmno521phqprpffics1tuvnwlnun2lc36c6p" + "1p12xhx1ywcslmh5zlh2Ay2l39ky42eAmyBC9jtDnw123up32942pBEd1fpf3utFlmCv" + "voFmlu9ntFnGuvluoGln_0123456789abcdefijkmorstvwxyzABCDFG", + "011230044567082901041a1201ab2cad2edf121g2c455h631a04450412hij25hhk12" + "014504455lam1aad1g30456304451a01308n5h6o636730450p1aaq1a08hi0112hr04" + "2s455h8n12011a4504011t455h010phr_012345789acdefghiklnopqr", + "0120345676879abcdef9dghdgihjekkl2m194nao1pfjfqgrf9fs192t0b011duduv5p" + "6awxy7yzzs9sqsakaAeBCb8676kDqEbFpF0uakGHaAHtxonoCIqspgGJdegrHy9A209e" + "yK79Cbbpthy7dgGHeB_0123456789acdefghjklmnopqrsuvxyzABCFHK", + "012342567869abcdefghi7jklmnopqdrsjjkit9m1kotk8787ppuvh9wxyzAxpk885B5" + "vB1vA0C6DExyyF3GcHuDp3cd7pIJuKLcJe69j7Me23HN9wIE0cIJOPEmLbb442GMJw6l" + "23b4QOLb7pPELccdLcEeb4JeOP_036789bdeghikloqrsuwyzBDEGIJOQ", + "01203453673895abcdde8ef7g2hdijak38h6l321jmnflok08dm016hdb31fpmhqpgrp" + "21ab6qqsgt06l338akbc5bbcchjaabtu3vm0b3abjaachq0h8diagmdq20mkacwxgtkc" + "mk8dc8pmuyt2m084066w060huzpg2zm2_0124567acdefghiklmopqrstuvwy", + "0123245672289a8bcddefg0hi41j4kjlmin53o0124akcpd7nqrs7224q0dt6d0148i4" + "8u235vq556v2vwdt6d281jxikyikzo5h8uxitzxww2tsdt6d01f5ts0h6vhv5hgc018b" + "23fgxmgc3onf23m9w2xmxw1x1jw2231x_01345789bcdghjklmnopqrstuwxz", + "0123415617258968a1b4bacd017ef606406ghijk3f06ha4almm4m006nb6ggklm6gao" + "opfjqm06rj01a1ba2f177989fj41sp5frt1uu7pvrjou8cnb1u01m550011u7wx76x5f" + "fjf66xnmspis1xioqma1haa11xrjx77wvw_1345789abcefghiklmnopqrstuwx", + "0123345002657890abcd372718e2f102e2egg3h9909c1i4jkg50fl21kmg3n923oddl" + "21f1po5eq4dr9c34177j171iistnn51i5eg321uee221cff11ilii8lsdluecd8vi89c" + "ukcfw9pcpoxwcf9f6u65oy6uw9wnpo9ffiia9f_01345689abdefgijklmnoprstuwx", + "01234253678229abcdef8ghidjc8kclmcd8lc8mfl1jnfom6p0djnqfr4sp4p882t9au" + "vhwn85jvuxdj7x96syz9Aud5BneCzvruDeBE2mnAvEFrgGHABnt94slgzvI25w0Dlmm6" + "lg85gG42HG1FD11FfrJeKBwnL44JL4_" + "0123456789acdefghjklmnoqstuvxyABCDEFGL", + "0123455606173889a5b3cd4efghi0jk1lgm0enn85oe30npqlrstjt1j01404edidun8" + "sdostvworwxr5yzqst6s56Abf0B4065C9DbEb36Cm0g22pwFx55y6G46ohB4ropq2p8H" + "oh38aI8HwEycf0s9bEAbwcawaIjGA2_" + "012345689abcdefghiklmopqrstwxyABCEFGHI", + "012341526789abcde583fabghiibabjhkllm5lnopim3kqlrstuhvoewcsibas2xyvzy" + "s05s5lcd07zeABvC2mkl2xnqixabiA0DbBlmlEhicsh55luflrFu5sykewkdufqGwHm0" + "IA0DCmabykyvABfaynyvvCHxm0wCasCJ_" + "01234578abdefghijkmopqrtuvwxyzABCDEHI", + "0123456789ab9cdedffb6ghgijklimi6naoejpmk9n9iq4rsnrdetq4hgusvfwqx6hhg" + "ijvucjdfdscyzrbgfbAB2ds7BCcaDyypsEdFasq482ca4hsE2dq4GlklyBm6eEEH6hhC" + "ICCJ82eEDAgJABtDFvk7guvuoGgugJGl_" + "0246789acdefghijlmnopqrstuvwxyABDEFGI", + "012345675809a5bc8cdbef5g950hhiji1jkdl67m4n53oppqr88ps9tquv6eiwx4hrl6" + "vy45z55rdtjApqqBrCr8z23DhinEs0e8sa7demaFsas0532vhrr8kGz2ndnk0zz24n8p" + "kGpHfIurr8nk3pur45Jknk1je8K33px4_" + "012345789abcdfghijklmopqrstvxyzBCDEGHI", + "0123456789a3bcd0efghijklm3no9p6qrstu5v8nn7h6whxyzoAqo2cikBuxrCD4EoBd" + "pt6qtsno6jtuFBG9HtIJtuKLiK4AMG7rr0EjNEO5n720enijPMJgqQQuiv7Q7rfH0R20" + "jr9HSuIPITgcU6JV5vvWtuvLHmHtm3HmXm23B2_" + "013456789abdfgilopqstvyzAEGILSVW", + "012345678559a98bc87def3geh6idjklfmnmoeofpfqhge2cr18stouvwxxypyzAfu9j" + "sBCDltDEopk6efFqwpklw11nqGfnl445mE38g9te249Dr14H24AGg95h9utoDE3ga96I" + "IJeuaChK6I23g5GK3aaCCDtH5hqhHqtH63DE_" + "012345789abcdefhjklmnopqrstuwxyzDEGIK", + "0123456718964abcdec5f7g1hi8jikjlgm18n4opmqr29estpgtuhvgmwxdudeevthyc" + "7kmzcsrbA0BCxADf8hE25F31Gf2cvHstzI5tDIA0th1zb4E2318jJH3scss8KGbcK90K" + "C9tuLdMCNnnMOLA3GjkPhiC9QG8jlkflK9lkikjiHPvHuv_" + "01245678acdefhijnorstuvwBDEFP", + "01234542676589a94b4578bccd5eb5fgghbc1ijiejigk37lmaanopqpr12r255aassf" + "ctubvmmafw8fa8as9wanxnyqpi6z4Au66zanvBzCzemDe83ea88E3qbF6muvan8GvBGh" + "nHI6gh6m2342bcmD0sJHub231gsH3qb50s50b5_" + "0123456789abcdefghijlmnpqrsuvwxyzBCDFG", + "01234556789abcdefghi4jk7l9mnopq8nristluvvwj2w9rvw0xyadu5nzABC4sDEFrG" + "HID7rEtJpwt2789KcxKcCuLzvM9aw9L4MNmCOfCo6OPK4jeyKHNAuvvMlPQNpRplxyN1" + "MNvMnuPKOiseuplPCul39a23j2iSSx2Tfsfgad4jL44jjR_" + "134578abcdeghjlmnoruvwxyzAFINQ", + "01234567689a1bcde2faag3fh1ai3a8cc3231dfjk43f0cc3el1mn939oi9oc323e2dp" + "23el7en9qlqr0cen7elsltu73f01hv4w4578x60dqe7eqe676ww0h1016ww0dpfp3f23" + "72y7yuely67ek6677e1m6w4welhvlty6yuuqqruqyuy6x6rz_" + "01235678adefhiklmnopqrtuvwxy", + "01234567879abcdbc5efgh0i46j1kl5m15b45mnop99q5r35232s5tosp90u3vqwxynp" + "no5rczfljc1A4BoChy7ky7bcnondC0Cjj11Ad33v5DEvcFhyvrrk1G45w8EuqwjcpH0i" + "4Bj1cz5D1GgE9bHhwBaBDfmkuIADiAklJiEuiAAD_" + "0123456789bcdefgiklmnopqrstuvxzABDEGI", + "01234567689a7a7bac8de1fefghi436jgkglmninio3opapqmrsqmne0tk8f0uvs6vun" + "wxgkyzfbsbdAyB9aC0ChshCes2D25EbgxFAGeyd9lH9aalIglHjCjs0uuHe0iuey0uqr" + "vDb0bgrJqrD568sbEcIggxfg2qKLzF8fgxxFcKEc_" + "012345678abdefghijklmnopqstvxyABCDFHIK", + "0123456789a9bcdefg92hijd3klm5jno3pqrofsh23tquvwxfyxtfgbzviAezwnzBueC" + "AeDvtBEsz6pF4GHuIFwgwxGJ67vikKquLE7M67mnnzzwJk6xxta93phigNkOlaBunzBP" + "c6KBgL92wgzwkKgLa9QpmnMRa40cc6uvLE45MHQSFTSMMqqsMqSM_" + "123569acdeghiklnoprvwyzABDHPT", + "012130456783900a1a1bac7ade37f3eghgi01ae930hj7klf83amgno890km07poqrrs" + "he90lt5lju8fo8lt8ffv21pwxog2de0a5821odo8xoxp58ski0trdekm4oodgydeegde" + "odoix45lxolti0tq0alt5l5f45hjrswhamhjpwpoo8az8fo81apopw_" + "01235689abcefghjlmopqstuvwxy", + "012345678392abcdefghh6ijklmnio23a5hpo8833qrisctu8tv8ion9w7xyzyx2gr6o" + "Am7k1BrC6aArD1EF5Do8d723GxHsgr7IHgy0J4fKLvM69N01y0FfFhmLItkl45luOG3u" + "L9l1cd23MiiPIlklMidQRLfKQScdpvARhppvTAh667KUU55DD1_" + "12345678acdfghjklnopqsuwyzBDEKLN", + "0102344536678791a4b4cbc3defghihjkjk991lmi2lhhiinjnbjkbeobpbqjrrs2t2u" + "ugvkowx9ydkj4694k9eolodlyd15nzvcb44667571591A7lm7BA7C0xC8A38DprEc3C0" + "oFoivcGHqEGrIflopq6IqEqIIf02inDpJ6GHjnyvnffgnf_" + "012345678abcdefghijkmnopqruvwxyzACDEHI", + "0123456789abcde094fghijk5lmlnj45igop7qrsbrb26tsuvw67xywbszqABvuCprDq" + "opE8oF5A5yyGHIfClhgJKkLgcLMfvo1IhL1NBvAhFAnl4F7qkimN3zhi5lI3x25yFOPK" + "Nc1N2snlsuvwQPBeEB014x3zoRBetmx2AhSvy3qKLTUsfgFUvwjhDF_" + "0135689acfhjklmoprstuwxzBCGHJLP", + "012304567018918abcdefghbdijiklbcm5n2cooi0hpnqrstnuvw9n3dxcry79zAsBCz" + "AfD9h8uEFqqG4sHIJHwFKwpAcorLwFzKKxMebc23DppNcqdeFqNOOg8Bh8Ej70IkPQ5L" + "8cPRIFcoCJRr04jSrLFRTKJh70CJi6DpKfjiEjxF4Ui6Fq18OE8BqGFq_" + "02345678bcdeghijlprtuwxyABCGIKQT", + "01231456789a3b1cde3fbghcg9i1ja4k141llmngg90ho0idilp21ch20hhckqfk4fkr" + "sht0kq0hcffqcf6rrj27h2de233bd51c01s2p2u7l4v04f270h8n1luwh2l51lfx5y5m" + "01xgq9t0c327kqmk6rkqq91cez4fw8tvpui1e5spossp1cc3pu7nm6hcc4hc_" + "0123568abcefghijklnopqrstuvw", + "012345567894abcdefg6fhijklmgnopkqjenr94sotlu3v1nwxyzf2AynootxBvzgxa4" + "5Cef0eDzinefEovF9GnHEIjJijiEJmmgG50Ktwf2LEtDMtHGzB56dNg6KrefepOaJmPL" + "IA83Q0NRLOj8RS0KxTFSQPijUe3vPipHvFVvijgxHbpkWI3vfkqffkpHpk_" + "01234567acejklmoqrstuvwxzCEFIJQ", + "0123456789abc4d8e3fgghij4k9lmnn0op8q50rstesunvwq7eqxyoz25AuB7CrDkafg" + "E2FcGzxH5Dgd5AbEd845exIo4kJd8qoKA1mLngmffJ7eMmyrs63H1wNnrDcNNnA1d8C3" + "BeOAn0PCi9rvAQ0hDQQtR7vdrvtq89diQt676BijB2mfSRD6R7h86BB9C3_" + "12345689abdfhjknpqrstuwxzAHINOR", + "0120345647189abc019d7cefgebhijklcmd2ngjo1pqnp5rls0eftuagpvw3rxkygzA9" + "nBebCoklDvBiEDkFGHlI1pxengh5JwKfLeMNrghtiOgigePMfcLPODrlKfasng85wLLe" + "klQtnFR83KlISQKfI720lIcTrxfc2UHoKfLKTuotkl85jhFGijuVsis0ij_" + "012346789cdfghiklqsuvwxyBDFGKLNO", + "012345637896abcdefg4h6ai7jekkc0lm5cnopnq1rstu0p4ekv7fo1wrxb2ya01zgab" + "Au8y78BlC7DAopjrnEFiAC1wEFlmpmGnnEyaBdHIC0JCvHfBCBuzKcGLM6BlNL8OsJmx" + "JvcdxMGnmxvHJCC0lmolb2kBlPHIPQfo0lopfoJvRvRStktKzpKGGLKGtKtk_" + "012345789abcdefghilnpstuvwCEIJ", + "0123145167894a2bc5544dae3fghijklfkfmnopq2hpo0rgsqtu0s40fnjvwqxyitz1i" + "q9A9mj9t7B7vv81CyD4Cfm5yu54wojjE6siF5ssDGEhHoq23tzlGBAq9DqsDsvu1waEz" + "noGE01yi145shyyi01iFFG513fIF5sezmGfmaeez0f01nmml6snmno_" + "0123456789abcdeghijklmnprsuvwxzABCEFG", + "01234516789abcdefge7h150i58fjk6l2km23n0op3qprs78tuvw7xyhzAc4Bn7CDjvq" + "9EAmjkaCszzAF8CDGx50redy2kh1EHIr01sJKELt4LaH5zaCKtEHmpeJMvLtis0NNOHm" + "PBeGtAQjOgRaS0SJTsEuJ8aHm2g3yGfgaCUPAf8VeGCDbIxDOgNOS0WRCDtA_" + "02346789bdgijkmnqruvxyACEHJKNST", + "01234564787229abc3defgh6ijkl3mnopqrd0k0stuv4ovwxyf3ozy4AcBwCDnEisFqA" + "GpC517BjH5Itj6iJejjpeagHGKBolLDtzM9mN245jvihFOowFDIePbBoDQMh89lEKx9R" + "ej0kEBhSklCT78JS89IemwjpUx0kBoP66qnoNORVwCP6VHow9mWJwxJSxTTX_" + "0123456789abeghjkmopqsuxACDEFGLO", + "012134456768592abc7cd7ef0ghijiaj202a4kl4mnonpqm0rp1psad2t26du62atlvp" + "wtt28sx5yoiqtl217a1hszzABs01nrCilDl0E201onB26B1FjF1p7G2xBsx5B2HlIxlD" + "21fj1pjFrpFi5JzKnrpq1r1h5F67KhzKxF451h1rgru6DggruEEIuEu6_" + "0123456789abcdfghijkmnoqrstuvyzACDFGI", + "01233456677389ab1cd2ed1afghaij1fbk3823dlcmno0pq2rskstuabhn5qvwpfa6ab" + "d2h501pedx6y0hndezbkAvBAq2pesClDEFGAyGHjgIxJ4v2Ka6grmrCL2BHCpnIM6ysC" + "bN1aa6uFrspfrtFOMPuQQwPvvRoIcgsCIsLwwSgILwIMNiCLcgMPPvfgbNgrfg_" + "013456789abcefgikmnqrstwxyABEGN", + "0102345667894abcdefghijkclla7m2hjcnok4bpnqnkr38st2uhdvwbxsyxzA494aBC" + "fDEd2xFr6fxG67vtDntyFHfIBIJKfg9LEBJCo4mtclwHbceMyxgNk4rk2hzldeFriL01" + "CrrzfglOfI1P01IjOQeMCRSiMytyqhCrKRFHFrTqqh56KUUVFH5WWJJCk8Cr8s_" + "01234579abdfijlmopqtvyACDFGHIKMR", + "0123342567829abcde1fgaeh7ii03jkljmn1oflnpqnddearp667s8blln4tm9egunvw" + "pqndx9yzABC3wDqbomEFFGCHwovwIvA6Jn9KLedfiMDxuwx9DxomGNNajewDvwOApq8u" + "klPC3jQvORBQRSPcc5qkv05duTkJjmOpc5gzbclnzrgzSPmgPCskcUAsuw8u822382_" + "012346789acefhikmnoqrsvyBCEHIK", + "0123456789abcdefghijk8lmnompqrjkslhtg4uv45wx5iyk45mqz1A0qBiC90DEFG2e" + "FwijtqiCd9ghHnIdjkfyht78pn673fB7l33Hqr23cwcJncfyHnJKcwyLl3g4MgyFrNDy" + "78OjrIPNrNmHeDQk6FH62e2a6FFGbQtqmqbBmHhampmHvAReaOqrs2BIRS5OR5ykqB_" + "135689bdeghijkloprsvxyzABCEGLP", + "012345671589a2bcdeef8dghbijklmnopqi9crst0gn0uvw4xy3zqABCt6fDdeAeEbrw" + "F8cGeH7gcrCkDIvJw4irxcqAKspqKx23Ex0LaKgD5IKxMFwH1DJgN0u6aO0g67mdyNGP" + "7ffQ01lqPfA7syKs452saKcdlmu6KsFm23RMmASRKxxc3zq6A7u6Kx3u7gghLhu6LBBC" + "_01345789bcefhikmnprtuvyzBDHILM", + "012345677898abcded6e26f2fgghhiij8jkl3aimno7pqrcr7cstg784h8uhek3vn3ws" + "jxayj0hzABC9zwec3vap69ayd09Dklj0ek6erE69aqoaaqqrFBcrrEEtGtdEbGApwEBH" + "C9ijChhzpwyIwjCJBbhKChecj0bG67gAuggAhzzH7p233aay67Le3acrrGcr23_" + "012346789abcdefghijlopqrstuvyzBCDEFGJL", + "0123345467389ab9cdefg11ahai59jfg9aik4lmn2op0qr7mos1aa8o8tctuhaavtwnx" + "xyz6jxmjcioAB4kCpwyDhEFBjva8G3wF387Hxl8qIejxbmp0tcHJxlb909KLo8JMHj54" + "pw23N1OKePcdP0ghha1oNQwGgKvyc2pwyR02PgzppwJvvSLTjvPU9jU9936UG3GBUhTV" + "Vr_0123456789abdeghjlmnrstuvwBDIKOS", + "0123456789abcde2e1dfghdhfaijekl8mnoe9pmq0dr0s2tuvkowrxe2iyzABC7z7DDE" + "u0dB01FiyG1pHtIe5pJDucBm851Bu4KcucKJ1pzAu06JL7Kc05iye1I0jLLsMKANDh5f" + "9O3CPeKcs2452ExQdfEmFiRSe205e1FRl4SIyP1pIeekk3SIPssz3NPsNqRSSu3NyPu4" + "qTSu_0234568bcefghijlmopqrsuwACDEFGR", + "012345674889397a6bcde467fegbhie4jkl2mhnhnjjioeigpqrf2s2tuvfb8wvxy623" + "zyeAkBhryfmCDEF5GnAtrfHfvlHIl2gbuvaJeA7ajk3K5wLqaJCHgMrgtNOunjbA23PO" + "rf2tb0IEF5fe0te445e4PCQFM07AfeR1fbwpbAEFnOpqOyyoRsscGPOyHIcdPzzSSIPz" + "gbrggb_012356789abdfghikmnopsuzBFJKMQ", + "01234567895a6bcdec1f7g2hbijk676blmnopeqrs8rj12tssuivbiw53r1xwxywytsz" + "3ABA5Bo8u9w5CyDEg1aFyGfzvoHh5aIf42aJKLywMltsLDJNOL7P67PQNk8MRuIKkdfz" + "QHBAGRASTUiIxzBAinQOVnhWCyin896CWETXHBw512vTiIx3txtRCyBJ45P4XM45zMxz" + "wxzM_01246789abdfghikmoqsuvwxyABEGHLU", + "0123450456789a67bcdee9fg7hi7gjklmn5o4pjmcqdrsjtdkdjiiulonv3wvwuvxuyx" + "yqznA8Bf78o22uo2lolCfkf44lg5sybpCD6E67tCtdCDfg1clFawGAE9H16E4pf4drgj" + "E9td63jIgz78ktBggjbpp6fgfkjIA8784pp66373bccqqAfgcqgzhw78bc8hA8GAhwA8" + "_012345689abcdfhijklmnpqrstuvwxyABCDGI", + "012031456789ab1cdefghijk7lmnodg8lpaopq84rs4t420l01umvwoxtyz55ABCxDfg" + "ozAEnFwjGBHrI0JKfaLHz5fg016AMhsF1c8vg8Nu2OyGvw9tdIi2pPQzEuRNgQiIDRmn" + "z6AEi289oxGBozt32OI7lSKSTjGcwjBmEBzIOUUHv2TySPAEI0PV20BmVF0lvw6ASPKS" + "20hvwj3Et3_01256789acefikmnoqswxzCDFIJOQT", + "012034567849abc4defccg0gh09ijka94aklmn9iho7fpnqrsktp1q5uvcfcwf6x8hke" + "y6wyzoy5oAy2fhgnho34k6c4BCtDE04gy3Fyy334fhxG4gs50HgnuI5u20f2j2Anlx0p" + "01E0EJxGCxKj8EjJKjqrxqgn4g6a4a4gabLq1qEJ6aJlEJ2156gnAn1q26f2fh8hfhf2" + "26_01236789bcdefghijlmnoprtuvwxyzABEGHIKL", + "0123456789abcd8ef8ghh0ij5kgh01hljmnhopnqrsteulvofvwxyozABveCjtq4lB45" + "5khljm1601nhi50DEnbFq0Ahq4tecu67obcGpHIjvoBvlBJw7KopvozcdJAdDvepLD9b" + "va0DdJKH3isAyMnqJwHNDvBOeCAP7KPIvah0APxaDytMjtvanhlDij01lB1yzAtQAhjt" + "Adh0011667_125789adegjklmnpqruxyzBCGHIKMP", + "01234526789a85bcdefbg4hibcjkl0mn5op2qrstukvwxqixoyzABfCAdBxqDmkE1FGx" + "fbHIsJKltL3MNg236qi1OP8z45MrcQbi5ost3MqPMui11OxOuk6qBffhh0011FOR01C6" + "xqkE3M4CS9deTD7UAnI2CSqj4vJOsJUDxqg47U8578nagpaVWTzm2xpGRX85ORe75ooy" + "deC66MmLDtC6_123458acefhjknoprtuwyzBCEFGHLPTU", + "0123456789abcde0f18g0hi3jkl0manfopqerlo6f1stuhvs8wx5boyp3zauA8mazm3z" + "BbwdzxCkwm4DgmjxEvmB674FGtgHgm9HzmIJnIwrKw89jkHauhabDIzxhLmaL7CMx5wr" + "rn9rk5Nh7t6GkOP6MFQy9qL7DRRyhLFQQJ89opHazmFQ4DJvvsSC2Sborn2KnIc4HTA8" + "IJAUqeAKnIA8899q_01245789adeghijkmnopstACFHJMN", + "01233456789abcdefg2hcij06klmndopi5qr1stuv5qwxy7z8AnBi156jwCD7BzCEiDj" + "i5Fq2378ciGHz2bEIpCDj0B9tJKILzo4BKlGJM34accJ0123HsajhN9a6kv5opLODjGH" + "z21sqwLz23KhhlwxnB7BzCN0ChnOndqrhl4GJxnBqwEvvHxfjw0xndKbdeN0DjBKOPPQ" + "DNPDOPbcndKbBKnBBK_01234689abcegkloprsuvwyzACHLM", + "0121342567898ab5cdef7ghij317k4lm89nofpqrst5156uk368vj3w2usejxdjkyqz5" + "5ohyoAz5qoefejiqBbsCDEynnFbnBbAEstefxshyiGkHmsIB21qr1o1tI8JbKeiqstwb" + "jkmsacvawaIKwbtAL551kHmHdtaxacL5ejjkkCBb1t5q2125b5vljk8v5olmJbmHoD5o" + "I8IBBJJh5q_01245679abcfhijklmnprtuvwyzABCDFGHIJKL", + "0123456789ab5cdefgh2ijgaklmnopn04qmnrstuvrwpxymzjAphBoCD7rEC2FzGo4uj" + "GHHfAxIk9w1iJqKtdEh2jAeLlJvjdeDoiMNsopK6AxwpBob3zG8DLBrs9fO8CDe0IPph" + "JqeKx3zIQOnI01zG2Fh2eLn0GHHfRNLBECBoR4K6tR67o4CDklGk01K61i01wpklDw8D" + "O8sSFTRNDoQOQz8DQO8H_012345689bdfikmpqrstuwyDEFHIKP", + "012345602678796ab6c00dedaefadghijk7bl0babmno5opnqr9sbteu0vw1a0xq5y1z" + "thncAB9f0dcBCjDkxpq6kzvEDFncnll060n5q6qbGHbaaj3ld1fa9tyF8nIjpJthCjCD" + "Ky79xpv1ed78796aIi8n4KJ4mCmIhiJnterLrmmC6CGDLDLCCjyvAeDE60n5d101w1GD" + "rL606aajLGrLCjKH_1345678abcdefghiknopqrstvwxyzBCDEFHIKL", + "01231456789abca7d9e2fghijgkb9almhno8p078qleddrsfbtuv2ia7fg4klmgw7xyz" + "ysjgomAjBb018C8Dpdd9BA6Eiusf3quy23FGnHjg78hiIhJvvsgK569a14xja7hnv5st" + "CLHMpd78jNnHnuiOuvhn1H14vsv5OFxj566EPAnH6g9OFGGDphOvQpIhfgvshnaP0rFG" + "OvnuJPOFG66gRarJrSiOBbJPd97Gx6PxxC0rPx_" + "01346789cdefgijklmnotuwzCDEGIK", + "012345607896a9b99c9ddecfgh9cfiijkflkhmnkmopq1qrgsitruvwxxpvwiywzgh01" + "hAhmBCCDEDs5qFrg9dwxCughncG0BgCH1qIJpqxpKdLMMN2udOc0PQgM8fMRoPfpBgf1" + "CuLMuvSoM7PixQTxun1qncrJcO3KTONcoslUVsc0cfrLb9LWul2uDSOXLMoP8YZmN8Lb" + "CDxQlUV4PQY11qzQqFoPQjjFrLICQjLbHNmVb9M7EDh7hmEhED_" + "012346789abcdefghijlmnopqrsuwxyzBCDEFGHIKLMPQSUWYZ", + "012304567898a48bc5dc6e3f5662c5cg56hgi101g0jgj7kl8m1n1llopjq404qkmrst" + "g0u5c5cgjgj7788mvwxt2n233vphmye3zes9yAb3B1pC4l98DxsEbFvGEz8bHIAJKLKM" + "x504g0HN8mOe6eLPvwEHwQby5BsEyRB2EHMDu7ObPxx5u5u7DccgzIowScTpt6g0tzFA" + "LSPsKT7iNFB1NUTpFAyAby01ikyRnozIpC04CVpC2n62u9Tp7iSuu99OScno_" + "0123456789bcdefghijklmnopqrstuvwxyzABFHIJKMNQRT", + "0123456789a9bacdefgh3fijklm1no9p205qqrs601t5ju7kvwprx2eyhaznv25jAe26" + "a9dq6BCzjDyEFA4ixA45gGxsa5HFFIdJlKw4kl8dvwyklKKonoL8wt4MvwFwNFh4O7v2" + "Oy9qfkdJtdtfBPQwuKefMCqu26wt9qCD4M89RKDSrSbaghFAuKij6Bw4iTvwSoNx4MUN" + "NxAOwtharSx2ghbt266BFAItbaTz7TtfbtTzCzijbaa9aVO7sOWVBPMCWMMC_" + "0123456789abcdefghikmnopqrstuwxyzBCDEGHIJKLMNOPS", + "0123456789a8ba3b1cde9fg6ehiaba89jkblmlbabll00nno1o81pncqrq7r01stujjv" + "hw9c8981txqxrqgywyem5zABvCehdDksEFGgfHstvsIJqKI8txa8qxcqtxAGbastLkks" + "stwj2MtxqxcqqKHK9cyvIJzNKO01JPJ9uj89gya8Q9GgR8BRHKB4Lk9fDGAB89gyd345" + "6STAR6CEB4vCABg0a0NUVgjvlpa8wj89CE9fmlBIGRfHDGB4TMemmlTAjvbllpMb4QQz" + "TMHK_0123456789abcdefghijklmnopqrstuxyzABCFHJKLNOPQRT", + "0123456789abcbdeef75gdhggiijdjkcl21cm4n667o7fpqr5s450kt0th75klu3vt01" + "r2cu1w1cpxpyys5s34izl2lbb3ABCBDCwnuEBy3pmFGFeGef2fHzboglb33pIJK8vLfF" + "MINkLOOPIQ1wkll22ffpL0IrQDm4RaEpSRklDA8e0kklMKRaL0K8krabt001boTNo7t0" + "uE1wnoJ9cu0kwnkraUVc75OVNVTN45efr2VQ9fIrd2W5J9fFm4gilzAWQDDAAWoWXYo7" + "YoXYo7_0123456789abcdefghijmnopqstuvwxyABCDEFGIJMOPQRTVY", + "01234526076589abcdbefg7dhijfklgmnjonopp818dqi2gr9g26stuvwxdf6a3yjfab" + "zAvBl6j9yxbmmCeCkln3bDyb3a2326l4ElFhinqutkbDaGi2rCHIDJGvFIkKLw7jAqqM" + "l6NFnj6aaMklinl6FOHPQyPREl26S7TUMmnQ9VNFgmhEi2VraG3a2dQw89nQn33aaGsh" + "onElHPinhishtkGDl45G9g45qMyg7dstAq89AUtk5GK4DBkz4UNsDJqMstkKfMtkdfst" + "kKKTfggrrCfg_0123456789abdefghijklmnoprstuwxzABCDGIJKMNOPQSV", + "012345678958abc4de5fgchijfklmjno8pqrsqrt1u5r0vqrwqwaxbbi6ny6x8izr0la" + "AwBlgA01iCD4uE1up1fFcGkG7ooHFIJKekLFuMar2MlakyHNOLlPqQHR4qmO5ftC6ny6" + "yxxbabarrttCCuSu9SgDqQCR6hDmG5biTMQKOJBluMRESugDoHekgceBvTKv5f7ofFwa" + "abqriCsqqQ5fzRhiUzzRCRVQdgvTiCbiabwaG5kGREEWgDuEDm9SG5Q05fwq01mOqrDm" + "1ugD01cwecdg_0123456789abcdefhijklmnopqstuvwxyzABCEFGJLMOQRSV", + "0123456789a8a2bcdefgehijek5lminmn0oa7o703bplkqrqr323a25a5ssttufuvfkw" + "4x9yyzbzwAs8BnCD5lAEFGEHn029pvIC12gJDpub9yCDKLjMhweh0a7owAoNOPNG29ej" + "zKN8yzij9yAE5a89D5EHMQwADpcRScplTmPUN8qSoNUVQWVKWRpv7oltFOvkekmiOP5s" + "irsO1rRLWROX12MQPUkqOPcKFOsOzKqSScoapv5s5luUoNplFGpvoaltfutP5afggA01" + "70fuuU677001_012345689acdefghijklmnopqrstuvwxyzACEFGHIKLOQRUV", + "01234536151768679abc8d7bef7g8ghcij0k7lgjbcm8gnopqrstiuvuavwxyzdwzABC" + "fDEF3EuqFGH94IrJJK04LEEFMrNOFljxkIqrAqljlPEQjxep9a4RS9hN5hQiPOtSrJLE" + "Lk7llP8iCTvUmyEFkITKPeef6QGPIGVL36WN67S6GPzUavstoX8dNOvuSm9adAmyLE45" + "67WNavPeep15Of7bEFfDxpux1F15pDxpvucBno5bS667bnnobc5boX45xr1715jxcO6Q" + "ljQlcB17ljjx9Q_0123456789abcdfgijmnopqstuvwyzCDEGHIJKLMNPRSTVWX", + "0123455676089abcd4ef1e1gghhijfkldmmnnh6bocpqrphiistd8uvtm7wxd445mnnh" + "xyuzABhiCDAmhEefrD1elxF0088uuzgz1gG1zo01uHgzfIeJ1eafKwLCG1x71gwxG1Kd" + "013MgLtd088ughgzuHNkd4tdnh0BO5BazrNP45O5uzxu3Jk0MILr47Nk01yHsQ1guzgz" + "1gIsMIcRJCsQCDm7y6mn76EcrSQTjM01UNnh0Bk09j01Ba1eeJah1ebc01cRafk0NkJC" + "3JV9kAAmm7PVAm_0123456789bcefghijklmnoprstuvwxyzBCDFGHIMNOPQRSU", + "01234567869a7b676c0ddeefbfghihjkl4hmnopq3rk3okoststuuvwmhmx9iyzABC7z" + "5D1ECtFCGsH2HpypAyzAziih5FIlDJdDtuj2KdFLMNenuv8KvODJDnnobg1Dts7byPJG" + "tuaMLwlKKcuv7zmvnjDnEJ2qvrPqGs1Dcb86677zhm3rzNfHj23QQRuSenefl41E01Jo" + "fHiH1DMNiyihAThmyPAyUFwmk3okJoDJyPk3bgmVTpFJUF4UJG1DwmqQpq01x0m3011D" + "qQwuLwDJWgJGTpGsNTsS_" + "0123456789bcefghijknopqrstuvwxyzABDEGHKLMOQRTUVW", + "0123455006789a9bcd6e8fegghbibjj2egkglk78jkdmnkolpoqrrpstuclefvpwxy2y" + "2gegiec9toz78fA4mBCA5006w645wl9o9b5wD1EpgFbiA4CutGlkFHI0ix4JK1BLyMNF" + "O56PJ0Qsst8401xyegct4RkNlkSQpww65wwltGE9QsCQeTpwEp9begc9CAGURbL3yM50" + "tG78A4rp3M5wwllkkN8fL3ctLNVWqrkNlkRVwlCAzC5wERrpO5ucuEERRVsppocdVWon" + "nXonucpoCuuccddmcducOI_" + "0123456789abcdefghijklnopqtuvwxyzACDEFGHKMNOQSVW", + "0123456789abcde99cfg2hiajidk7l1mcdfn1neocpoqrnsjqt7ruavs9wiaa0x0xyy1" + "ezABp0Cg5wjisDEFiE5iG6w6eoHCbCmIJFBeyKbfuxB8eoe99wwppLdLjiEb1mzuBMy1" + "NFL15GLr85LOuxbfoqoccppL9cfgPIrQmILrLORSv45iAvdLB8cxQRkONHfnATqdqU6b" + "nVoqUWOmeo5GlQkO1nv4HlNHdkGEdy67lQQRcd2G895Gv8vswpUyv885425w9wB85Gv4" + "2GG6dyAv9c2hv4w6wpcdcpp7_" + "012456789abcdefghijkmnpqrstvwxyzAFGIJKLMOQRSUVW", + "01233456170181489aabaccdefgfhgihcicj7kdl6mn5cdonpqhrstihpuk6vowqb440" + "i0nxyedldyyoozAz3jBCqDjyEFFGenCxrszxhrefHcIJvK177kgLJvMha3vKkNhgJyj0" + "gLr70181tuqO2I7PAzEF1fQrbMBLf534abbMmRST9Hj0gU5Vcj17sttNWgGA1fjyWBVO" + "XTHEEJX7Lp9aAC17BLr71fgUloyef51fHcn5lB5V48xVBLLpFlf5XTCxUkUp8X48rscj" + "rUUp3jlo347k3jACacQrQSQr_" + "0123456789abcdefghijkmnoprstuvxzACDEGHIKLMNOQSUV", + "012345671889844abcdef6egg1ahijjkka9l7mcn89o9181bbcc50p0qrps1tuvwxbxy" + "zsAtB6nC9aa7676DlDcnEFGiwHIJaIfKhLesp884p80p011bbcMuHMFNhmxbuh96tlMu" + "HtqOPvrQ0ppRgiGxSynC0p0qy33TeGqogw1Axyo9rpvwyU7mgisSTVvq9aBTpoiFoWah" + "o9qOhLrpy33Xxy4aFc96NnB6aInIdP96TVzr2Bc5S2sSs1IJ1brQ1ALY5VAtnINnc501" + "hLGxT71bA9lh96deB601hL_" + "0123456789abcdefghijklmnopqrsuvxyBCDEFGHJLMNOPQTVXY", + "0102345673689773a7abcbdcdeeffgg4h0ijkljmnopqree0rst94juavrwxyzA61ngB" + "Cu5tD5EFqGomabb3fHcb5tIEtJbKdcEqu8b3AL87MKqA73NlAucbEDnors97t9sOP801" + "QRQ0011nSD87rh97ucTQlTlPomu8OH24KUhV9xNC69CuAuQ002ALG6sOyL244jh0Labg" + "Q0vdb3bgcfgBsVxWHBBXWi9xKiabdcyLpqL9gBbgyLxWgBab9xR3Fy3iKiR3QRyLvdLw" + "87fHD5zwt95tt9a79xwxxWD5SD_" + "012345789abcdefghijlmnoprstvwxyzBDFIJKLNQRSTUVWX", + "012345657218891a18282bbcde7ffdegch71ijklmdind4op45p2qrsotn2bbc8c8u3u" + "o7b4vw5wx59hy11871oizAsBvCfdDEuE3uF3FGnHinuI3xifAJhCwKfbLxBM7fjb8ufd" + "MNevvwO7Iwke45rgxIjLmkLxx5tmF3OP45Q3IRSTSqUOmk4vVz9h0SWicIacH4NkNXNa" + "evT9ijWizAOP9hyYMNVshRjHzopjFGacAJ3xByApRKpjcIERxIyYJFRKjHTDsBuIST8u" + "ByyNYqH418ZSFGYXYqXlSTByTDsBDE4v_" + "0123456789abcdefghilmnoprstuvwxyACDEFGIJKLMOPRTVWXY", + "01023456789abcdefbghijkj7h7lmn8o78p8q0q778rs8iimomdodftfpt1u3dmc2rv0" + "wxyzxABzCypdp8DEFGHuIgJKjLMKKpDlcN01gh02sO8Pbc8iOQQRliq7JKASvBDEqDC3" + "vT08BzvB2rK3UVfbUDv0uaMqr61oD2mc9WUVmOim2i2uus6Lxsijwr49T2r6i6cGnGmn" + "jcwr08lkkjp8MKWFIp2usOjc8ieXD2YLUDi6MIqDcN6Lob9aaS9a1o49ONCBCyCBB1q7" + "1ooW8o788oobsOK3eWq7oW8oZqq7788obFoW_" + "0123456789abcefghijklmnopqrstvwxyzAEFGJKLMNOQRTVXY", + "01231456789a3bcdbefgf33hich0hj8ikl9kbjgh0mh0hjmnopmog9788qrqs7ntnu4u" + "dvcdic8i7ffgghhjjmmw0mx1mw01gyzpgh0lABwp6CDzCE2FFGaHx1GEsIFbJKqLMKN5" + "NfoJalLOykPMcx148gghtQHzAalR0m8qbB0114bjjHHoLk4Miy78GexSkloJeTGev4JK" + "SR4uuUnJFGFCTw6AKV9ag99aLk4uMKFbwtRMhjOWBHAalR8gghh0y0iyv47falCBOS6A" + "lRdvcdBHoJWPHo0mPMHzoJMKKVmwMKPMxSWPyx0my0yx_" + "0123456789abcdefghijklmnopqrstuvwxBCDEGJKMOPRVW", + "01234567389a9bcdae1f4c7gghijkllm6k3nop7o7gpqpcc959h5rnrsqrqtuh1jvwvx" + "9ay6ozABrsraaC5DyEEFBcG1BGHI7wJKdfcd8L6IM3oBANFusOEvOPkApqfe3nrnPKfQ" + "EFFggpop7o67y6pqLPwBRSqtMTRUhqIwN0G1zMh559CJ6k8Qy6VTrsqrW5klw4MTlN45" + "bC59dfzGnLyEfQ9avw7oSDDbzMaeN0pqbCFuSDraz2sOuR2rz2rsozFuCJeJklopEvBG" + "vweXwBcd4chUcdpqqrFxeJoB7ohqqrgh7gghrn3nM33n_" + "0123456789abdefghijklnopqrstuvwxyABCEFHIKOPQRSTV", + "0123456789ab1cdeead6f6gfhg8g9i9dd6dejklmn4elolopamqarstuie9i9dv8elwx" + "v0iyolo5yzAo01Bvv0CrCDEAFGiycy1c0109mk2v5j9d45HIsgieJcKnGLGMgdELMwxb" + "o5hNwz5OsPQCbtLo3sH05jn4sERtjk1cA4RSyzQBcTEArsmkhgamkuGLdeOkU7btrV1c" + "NWzbel7OgdyzIEOkTzU7kubtGMRtmtVNlmmttuIGCr3IQCzbFGJXIEAosEbtrstuCrzb" + "6ld6RtQC6lELolAoLqqaLqTz9d099dELsEEAAooppRop_" + "012356789abcdefghijklmopqrstuvxyzABCEFGHJKMQRSUV", + "0123456789a5bccd9efghijh5j52kl3lgmnopfqrstubmq4vfg5ghq9gw9hxy0zAABeC" + "fDfg4f9giBiE8f9edFaGbcH601Irv7GJjhEs4vzyJKBLM8gzbN7Ov7v22z3ihiGjdGPp" + "caxEcdiQMw89bcN4dR4fpS9ea9aGGJeCiBzA23CqBLTKNaB1UHvDl0VUBLP8uVKxSDqB" + "DzB1zycdCq7keCVP9gFewFxELtPpmAmqrLRnP8TKLtSWIrSDqrvDMwN4oIv2HvmqIr23" + "rLemFn4v3iiEGeELrLM867EsIroIaGMw7kwFGJnoMw3i3l3ikl_" + "12345678abcdefghijklmnoprstuvwxyABCDEFIKLMORTUW", + "011230456783899abc9dcef4g85h454d010hhij0ik301i83l101m9300hno7pqrrsks" + "ikhihtut7kv0wq3dxyzfiAB8CvellDE3Bm3dduobqrFG5hhiDibcf9awHwwq89vl3dlI" + "6tGxmJKCf4LFMNOyzmPaeQFrjMRBC3nS3delnocv9dv5celDFGJTdhSz9a565D6Faw83" + "el30eQobFrJaDUEjutI2zVbC45IUVWQNnS0183no3dQInRI2f4SzE33d5DgEogau4512" + "dukOC4VPUppx0145PXX6SBtk89gEzVUpPX9aEj67zfkOcj7pfPdhpO5h7p5DdhDU675D" + "_0123456789abcdefghijklmnopqrstuwxyzABCGHINPQRTUW", + "0123456789a8b51cdef6ghi2j67klmef4n9o31a4pl5pp6qrst5f3euvwvxw20sy6zzA" + "zBkC0Dmk0E4FG7HpIu5pJiwzAKxwLMxuiNplOAp6PKyQrRrzwzMmvAN3xwExbeJsHpST" + "CThC8HRhgkdEolBKUazB45l7a44FcuFqLVrRlmBCwzxwybPSWGbeQHGRex6zHoHppl4F" + "gh1xzBwzcXp6Jia4tdDI6z7BbeuvxuyQ67WGBKyb010DXOi2be20DcqrQHl7exlmi29W" + "OPdE0DnWxu4nuvvAHooM20cXHoPSQHl7zAhSnWi24FrzqrJi1qi2201c31N3UNN33F1c" + "31010D_0123456789bcdefghijklmortuvwxyzADEFIJKLMOPRSTUWX", + "012345467890a3bc1dde62fghijk23l9622mmnondo5d0590945p4546622mmqqrnron" + "s6tuobv7w0wjj1f878xrywfbez05A2nrBCondoDEFp8GHhmqFv6p62tB1dsA1IdeJoCK" + "LlkMNz8DhsjkzcMOpo5pu6LHvPQRsAv7AvrE01f8Nz1I45pmcSIN055pBApmT4iUvPwi" + "LyHtMfLHywVO45mqOgPqVOqrlTezFpgQrEmn1IRWiU2mKPK3015dzcezde5d3qfggc5p" + "UJmnPxAv6ppou6Ht05JfGRxGobGR7nQRBCkVTuv7PxtB45vPRWUJQRgQAvbcBCobpoHt" + "jUHTfgf8wjFp_0123456789acdefghjklmnopqrstuvwxyzABDEHKLNPRTUVW", + }, + + full_with_errors{ + "01231245016274787445", + "012345605660780112233923", + "012345624785184559851801", + "0123456789a8b7cd14c2deef", + "0120324567859a294bcdeadecd", + "012341562775389a3b56b7ab75", + "0123451267849823508445845001", + "012345678598a998ba8508345c40", + "01203134015617206892200131345617", + "0123344556789abc4a9d02ebf47g277g", + "01234526756869844ab0ac450168844a", + "01234356789aa006b6bcd8d1efgfc1hf9c", + "0123456727386790ab5c039dce2390ef3gc9h1", + "0123456771508219017a6bc756dc45ecc7561f", + "0123345627389a07bacdea3f1g7ach2i072329g9", + "012345627895a6bc62d7ef9g7e8gd515b1bhhibcbh", + "012345674898a2191bcd5efghbg8i01b010f181b6b", + "0121233445260617016869ab0c68deef383g34012g1a", + "011234536789784a349bc667c67889539b78dc6725c667", + "01230456677896a9717b010c9d67ced06f0gdh0c0i565jce", + "012314505675566389a5b6cb6d2e1f2102g2501f1hb6b502216i1hcba52e", + "0123456789abcd1e0a9fgh3e23ijk7hj0295k44i8l0mndoiopbmpfobqeefqe", + "0123421565789115654a6b63ac206d3ef9gh42i4cj633ee020424aaccj07ka8f9101" + "4220424a", + "012324567879149ab7c4de805f7c6g5d5h8ijklmb55lln20op5o24pe56lm8ebqpeop" + "8rb479638g78", + "01234526076879aba2cdeb2fab0gfhijkj0la15m0g0d3n15o05pfd1q4f1n0l151q5m" + "2m262pc92nng4rs9c9", + "0123456789822ababccdd4efg82hdi9j2aaceck5l04m9445cnbo0pl02q2a2qbcba2r" + "cdbodi0s2aall0j09j89d1acd44md12rcn0s", + "0112134567589a6b2c37a0d98e9a6401fg120h67a7a0d945649a45id67d99aa05801" + "a0ji45642c376737idk19ajid9ej133767k19aa0640h4558", + }, + + partial_with_errors{ + "01234125_03", + "01230454_125", + "01234051_012", + "01234516_025", + "0112013450_023", + "0112344526_045", + "0123241567_037", + "012034155601_024", + "0120342567_02456", + "012334451623_0236", + "012345166789_5083", + "011230450601_12345", + "01123245657387_014", + "01203456728768_135", + "01230430456758_126", + "01231245640170_350", + "01231453678445_027", + "01234115567478_024", + "01234156473889_025", + "01234251678472_036", + "01123405010526_0135", + "01123456756056_1236", + "01230425366782_4025", + "01234056763843_1358", + "01234205067258_1246", + "0123451657896a_3904", + "01234567388978_1256", + "01203456728720_01235", + "0120345627486449_1359", + "0123403501160167_0245", + "01234156370889a5_1246", + "01234250657889a4_7163", + "01234506727894a0_0356", + "0112304536300112_01234", + "0120345657530118_12367", + "01234250676017_1234567", + "0123456753282001_12478", + "011230456787925aa2_0346", + "012334056789a75bbc_1269", + "011234506501501201_12456", + "011234563725785669_03467", + "012340510160678798a9_135", + "012345145637230182_01247", + "012334562370528796a9_1245", + "012340563738239223_0123468", + "01234056078498a9408498_1367", + "01234156622768969a7b4c_0258", + "01234235670482592335_0124568", + "01123456017809a5bc7d_023567cd", + "01234561748695a070b7_1256789b", + "01234567538968a52353_0124579a", + "0123300114565723825957a9_12367", + "0123456708590a06b7081c_012478b", + "01233425462738691a342338_01345689", + "012345465745808557469646_01345678", + "012345678497a6bc62672dc0_013579cd", + "012304022561782927ab1773c9_134568b", + "01231405617849a79aa749140178_01268", + "012342352356678056893580011a_12469", + "01123456473468560180930112_12356789", + "012324561785799801a0798501_01234567", + "01234516174645581645947994_01235789", + "011223454678298a78b2452c97_b70893265", + "01234256758914a5177517b65614_0235679", + "0123454267389a7bbcd0e6677f87_01256ae", + "0123451678961601a0026496b878_0125789a", + "0102234552267869a54b522cc669_1234679bc", + "0120134567740145389a137b73bc7b_024579a", + "0123456789abc67a8d38ded494f9gf_12479be", + "0123456783791ab7acdb3ae3f4_0134678abcdf", + "011213456789a468b6cbdefgehhi7jgk_12569df", + "011231405673839ab701a6c7d773_012356789bd", + "012334356748949680a9bc964d8efe_1235679ab", + "0123410156570898ab5ccb697d575c_01235789bd", + "01234560789ab9cd96bed9fgeh9iifhjfg_1357acg", + "0123455667144556148945901401144590_01234678", + "0102345675289a7bcdefgd8gghdiijd3fkdi_146adeg", + "0123450678890ab3ac64de4f2g9h06ig74_03567abdfg", + "0123455001657872951a5bc7dec665c6efbggh_13479d", + "011234256217289a122bc9c246de2bfc6cgf_12458abdf", + "01203456726849a38a9ba3cd68e73456dee749_13457ce", + "011234015675289a56126001a7609a12562875_01246789", + "01213456789615a3bcdefafg9h1ijd1klj5dgm_12458cei", + "012345567892a9b0922cd596564ed0fdd4b0_13489abdef", + "012345627896a9bcd41ea00fcg01ghfi0dj164_035679bf", + "012334567819ab9ccd2ce2d5c7199defg9fhi12c_0268bcd", + "01234562785695abc26256231962a6a99519_013456789bc", + "0123456780629abcd8eb8fgeb93hh1fb8f1ibccj_1257acd", + "0123451637589abcd22e625fgbb11662chhib1_1f0ebg4379", + "01203456758087597515a3ba5c4da31e34154fa3_e8ba61930", + "0123214556789ab1c8dae8f0gf6hijbjg7k66hlb_013479chi", + "012345678298a71b4cdaef9gdhhiijkdldhelmm6lm_0356fkm", + "01230445167528955ab9cde05875af8gh002hb8a_0d537g4h29", + "0123245671189840a4b9c8c23d9723c2c88e3d_012345789acd", + "0123425431671859a0010bbcc5c2599dde5953_012345789abc", + "012034567890ab627c56c90d5e7ca7f0200d4gh7ihgj_12358bd", + "0123433567789a2362b61c7d9e62a7f9e59a9g239a_7d23af805", + "0123456478196abcd46481aef69619gf021h01781h_0a35d9h7c", + "012324456782249a81a8b5bc814b50015082a89aa882_012456789bc", + "0123450678969a62bcda96e9f0064gghi0b7jdkeliheedlm_13567ac", + "01234560780795ab0c9adbec3f1gc1711hfij13jkil3ml_12458aegh", + "01234567869a1b34c219bd6ef66g9h2i9fb99ff64jfhha_01247acegh", + "01023456784859a11bcdefegh7igcjkclhcdmj8n7knddond_1368adfjk", + "01234567589ab9cdef4b813g8hi2j4ik23kl8elmneo8napa_1347adfgh", + "0123456478049abc5de9df2ghigj4kblemm5hnl6jee95o6pl6_13457aci", + "012334536741389a1b1c9deff0011bgbbhgbijc92g23_012345789abcefgi", + "012314451678592ab66cdefghi4hj71d841k6lgkk5k6mkgnmoop_0348befil", + "0123454678690aaba23cdefdegchi7dej9ec2ekl69j35i5m4645_1258bdfjl", + "0112234056017812154049407a12012b1256152c2301404d4e_1234689abcde", + "01213435672890a97bcdede4cfghg3hdaijhhckh74l3immnaikopo_0146cefhm", + "0123244556789a8b7c6de85a8fegghi9d1jkljfmnoblmpiqrbosqt_1368abekn", + "012342567890a85bcdefbg1ah7eijklgm9nlo1lgmpkqp6orqsd88q_13678cfjl", + "01234567089a7b01cd8ef984cg1hi0hjk8jl011hkmnghj08o0mcop_13568adik", + "012324567895158a565b23c935d72378e95fg295he205bei_0123456789abcdei", + "01234526789ab4cde8fgghi6d778bc02c0cd3j1kd7hlkm23ano2nppq_035679cfj", + "0123345647892ab8cde05e71fcg5h2ihbe1j7k070l5emfi5ihh2n5nbb8nb_" + "1269adk", + "012340567839abc0d2e4dffgheih7j9kle9cmno95mploqrpm9s5rtsuto_" + "123468ahn", + "012345670869abc8defefghid4cjk0266lk6hm67demnoneplqdea545d4_" + "13578bdfi", + "01234526231789a87b1cd74ef0g6h7gi0j3kiljm177nlmob3pilq33hporn_" + "03569cdfp", + "01234534567584790a47b0b2c723de5f755gc775ch0cc73i755gj9_" + "62abgf951483hec7", + "012345627489abc8d6efcdghi0f58jk5jlhmnmond6pdiqdrrsqrdarbpdpt9b_" + "1359bcegp", + "012340566789abcd7e78f7g6hg4878iccj67dk3l7m3dmn4opqg6nqro5sg5806864kp" + "t7uk7m67t7_12359bdefghjkoqst", + "010234156789a87bcdea2fga4h3ijk1j34lmn3fop1n6q002irsn2kkm1jn61skg34l4" + "4iklti6tl4jkjl4i_0136789bdefhikmpqr", + "0123456789abcdb21ebfg8hfg7iji7khfljbfmnobfj0lcpqojj023ra01sn3t1unacd" + "hmnomlvmlcionrahrkhmoa_13579adefhijkmnqsu", + "01234546404667467801091a407b0167c4d2407b1ebf467b67017b46c41g0h40bijc" + "c44001jk40bl467b6746407b0hbf_1346789abcefghl", + "0123425678592ab2accdefg0hgg9ij3kfl2h3chkhgl9bmnopbqpmrh30s0ikia3fn4h" + "rcbmqetrrumaq4arpba3fv4hvhh3vha3hkvll556l5_01346789bdejkmorsu", + "012345677897abcde0bf6g75hi5jk16l7mncopmh5omb4eqrgnl567sl6gstukvwjxy6" + "bzlbwj5wbfrABA7Cs5s4lbslwDrdqrtqtee001te_01345679bcdfghjloqstvyz", + "01023456789a0b8cdefgghihjklmn2i2opbqhrnstutv2tgb5uuwbxeyzd85Ao5uiBr3" + "56rttudehArtCDEp2lr3DFghlw0bwFwp0jc6zc3Ec6AlbA_" + "1246789dfghijmnopqtuwxzC", + "012345672893abc8d2efdg6hi0jkfklmnop74qrjdsctsu2n67vpfwn6bncbvxyw95zy" + "36a2A96kBreyC6Bvtzqvue932unf2nDap7mE367101p761_" + "01256789bcefgjklmopstvwA", + "01234567894abcda3e9f3ga6h5ai8bjhk7l2mkkn0opqrstmeu7v4ak71wx7w4yw4a3z" + "23uveuA23xh5sB57BC7Do22EszAFx7Gn3xnHIHJBIubByo3z23A28b899kmk8b_" + "012345678acdgijklnprzBGJ", + "011203242567879aabcad9efg5hddif67jiklm1bno6b2pbpab9a90q6cfa6rsjnbj87" + "68hdhccfthhdth1burf6c9dief25cfkgc9d98v12c9a125ufqwcfw8urswc9d9uf7xf6" + "a6a1q667qwswsysww8_01235789bcdeghijklmnpqstvwx", + "0123456789ab5c9d9efg5hijkablmnlog6pq0rstuvlw4xo0esyfhoz7e2ABpehCcCk4" + "wD01xlrgdAo0be23CBE9d2ukEF2y3GyHtHAB9e5cd2hoBIpqqJKLLiE9lolw0riwMN9e" + "w00rCNiwLilohCxhaxrOCNO6xhABvarO_1345678abcdeghilmoqtvwABCEKMO", + "0123423516789abcdefghijklm9nofphqrset4ujivwxg24yzbxk23fz6ABChifgDsEc" + "g2fFscGtzdvlHI4259Jh4qKGLvMiHNcOoP0Q3523PDEcexOiDQ59d6Fs42GtjL23gEtg" + "sekn8DxkFs9Rzd59Qw35wSFEgE8FQwkn23DQQwwx_" + "023457abefgikmnoprsvwyzCDEFIKQ", + "01234567897a9bc05defghdifjk7lmnojpfqrkisatukvwmxqy6gy3nhfqbz7arAqyxB" + "By0CtDdEFGhHy35IDJluj2KL2K6bBI86HDr4MNhGsJOHBOwPwu1QEDRcwr6701u6LJhH" + "wuatc0MSO0t1lukBLTRcc0z0URHLRcPfHDOHDQHDDJr4HDwPmU_" + "02345689acdefhijklmorwyEFHJLNPQR", + "012345367898ab1cdebdf4ghijkj0kc7lmm0n001kop04qrs5tquuvnwj9xmcq1y2gpz" + "hAmaBszr7845wibcc7bcCDghyEl20F78EGlm2gH8IJgfghKLLIMBLphAaNAHOE988PIw" + "QRwhhSNdpzz1deEQLnwh1ynweuwhmahSSTsEEQLphSwhxmKLyUdU_" + "01234789befghiklmnpqrsuxzADJKOR", + "01023456789abcdef8dg2chiaejhk4lkmbn9oplqr3slitbuvwaxw4ywzABqCrDltrEF" + "GBlHmx9mhIDvxsDBr3k43J7KLhMEhihoNAOBdIKNmPkNoQQlNRaoaxHNIDlqsSQTT3UH" + "xVWHuUIMXbX2HzitdIMvxQiMDlvkMvr3trrw0iQTT301bQQlPVr3_" + "1345789ceghiklnoqstuwxzFHIJLMRVW", + "0123415617897abcdef9c8ghi2jklmlnmopq6rs1gt0m3nueviwxym23525zno2loAwB" + "iCuDvf86no6E56yFGEsHtu6ysjgt52I43nJK9sJHLiMINOPMEp56gbOJ90u5i2fIvfm7" + "IQuD86JK8R6ElmClRJpqSTUS86lm89qSi9vii9jUc8sjhcvVIsc8_" + "0123456789abdehiklmnoqsxzCDEGKNT", + "01234567689abcdefgghij0k1lmnobhpqrjs2fctcusvwlhxly3z3guwABwl8CCDu2Eu" + "fFtat22fghGmuwyHlyIjw3gJnIKy3gIqLMmnNau2OuOmkBPw5OghNLnIjsIjn3QINQbc" + "CNta2f8CCNfFAl45ctOP1RwlP1tauwqrrxNaOPcutauw5E2fScSTRq_" + "012479abdfghjlmnorstvwxyBDEFLO", + "010234567893abcd172eefgheidjk9lbmha21nbo2epg7kqr53013sk9931tuaniiviw" + "eirexyqra2zpokAzrBt8eCvD02qzEhivyFGwtxniE5Huf9FI01vD1tuaarBmJAGyKfnG" + "rBLMbKf901txN0pEOBCP2nBCP6tG64LICv9Q2of9vL4RCvboAbbooftxxStxf9_" + "0124689abceghilmqsuwxyBDFGLOQ", + "01234567829abcd99aefg4hijk5klmab1nokpll65qrstuv3fpgwpl3x0ybzAhi49ahi" + "Bb6BC816g4ab01DzBbACE2FteFivlmv3GwHIC83c3qFt45Jjm74jdG8KILAhefxMaoNH" + "O0d9fpplGwtdPad9fp9JlmbzNfHIDxyPKQJofAPaAhNHaogwR9KDDxok8KNfh9GJfAAh" + "9JJjfA6Bl8PBuGplfpPapl9a_1234578acdegijkmnpqsuwxzABFILM", + "01234567898aabc9dc0edfag3hijklml3nonjpnenqqro00kestuneivsues2wxyn9zA" + "BCc9o0yDklpEFGCH0kkso0sxdc2oIJbIbKhqneo0Etab9b89c96ihaf1LEkllMtMrz89" + "38Ffv0ijf1qrJNHDmOc11lqP1Bfmf1ivvfQRBKksnq1Bf1bKrKfmGmqPc9BK7vKJvLxy" + "MHhS3nmOIJNTR2233hbKhqR2OMyNBCCyMH3nHD9bqrrxUOVDLk7vQRqrhqksonR77oGU" + "9B7vonJN1BpUIJGUOMMHOM_" + "01234789abcdfghijklmnopqrstuxyzACDFGHIJLMOPQSTU", + } + +{} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp b/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp new file mode 100644 index 0000000000..0a2c2aaed8 --- /dev/null +++ b/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp @@ -0,0 +1,78 @@ +#ifndef _TKET_TESTS_TokenSwapping_Data_FixedSwapSequences_H_ +#define _TKET_TESTS_TokenSwapping_Data_FixedSwapSequences_H_ + +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** Random problems are easy to generate, but good ones maybe not so easy. + * Another way to generate possibly better problems is to take, first, + * a random problem and solve it with a reasonable TSA. + * We then record the sequence of swaps generated, + * and use the sequence to GENERATE a problem + * (taking the graph to have only edges which arise from mentioned vertex + * swaps and no others, and taking the desired vertex mapping to be + * exactly that which arises from performing the swaps). + * We then solve this new problem with our TSA and compare the number of swaps. + * + * This has the benefit of providing a solution to compare against + * (the original swaps), which is also perhaps quite hard to improve upon, + * because at least one other TSA did not do any better. + * + * Of course it is not a direct comparison of our TSA with others, because + * + * (1): We obtained these swap sequences by removing unused edges in a + * returned solution. This actually changes the problem, so it is possible that + * the returned solution would change if presented with this new problem. + * (Although it would be most elegant mathematically if this did not + * occur, it seems hard to enforce it in an algorithm. There is not much benefit + * in doing so, so it seems unlikely that it would arise "by chance". Even if it + * did, proving that such a property did hold would be hard). + * + * (2): Vertex relabelling also changes the problem, even though it is + * "isomorphic". It seems very unlikely that an algorithm would always return + * isomorphic solutions to isomorphic problems. (Even if it were an + * optimal algorithm, the solutions may not be unique even up to isomorphism). + * + * (3): The TSA may be non-deterministic, due to RNGs. (Our algorithm is + * deterministic, however, since we deliberately set all RNGs to a default seed + * before use). + * + * (4): The returned swaps have already been run through SwapListOptimiser + * passes to reduce them. + */ +struct FixedSwapSequences { + /* Encoding swap sequences as strings rather than inside a vector + * should give smaller C++ and .obj files. + * For convenience, the vertex numbers in each problem should be + * {0,1,2,...,n} with no gaps. Also for convenience, sorted by string length; + * shorter strings are usually "simpler" problems. + * + * "Full" sequences came from problems where every vertex had a token. + * "Partial" sequences came from problems where only some vertics had a token. + * Thus, the vertices which did initially have tokens are also specified; + * for a fair test, this is essential as it may enable reductions + * which would be invalid in the "full" case. + * + * Note that some sequences currently give errors with the best TSA. + * It is due to disconnected architectures, which can cause errors + * (although not always). This is a bug which should be fixed, although + * every architecture we use in practice should be connected. + */ + + std::vector full; + std::vector partial; + std::vector full_with_errors; + std::vector partial_with_errors; + + /** Upon construction, the fixed sequences will all be set. */ + FixedSwapSequences(); +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp new file mode 100644 index 0000000000..d1dfc8ce98 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp @@ -0,0 +1,26 @@ +#include "NeighboursFromEdges.hpp" + +#include + +; + +namespace tket { +namespace tsa_internal { +namespace tests { + +NeighboursFromEdges::NeighboursFromEdges() {} + +void NeighboursFromEdges::add_edge(const Swap& edge) { + m_cached_neighbours[edge.first].insert(edge.second); + m_cached_neighbours[edge.second].insert(edge.first); +} + +const std::vector& NeighboursFromEdges::operator()(size_t vertex) { + const auto& neighbours_set = m_cached_neighbours[vertex]; + m_neighbours_storage = {neighbours_set.cbegin(), neighbours_set.cend()}; + return m_neighbours_storage; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp new file mode 100644 index 0000000000..dc9c17bd75 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp @@ -0,0 +1,51 @@ +#ifndef _TKET_TESTS_TokenSwapping_TableLookup_NeighboursFromEdges_H_ +#define _TKET_TESTS_TokenSwapping_TableLookup_NeighboursFromEdges_H_ + +#include + +#include "TokenSwapping/NeighboursInterface.hpp" +#include "TokenSwapping/TSAUtils/SwapFunctions.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** Simply take a collection of swaps (or edges) and construct the neighbours + * data. */ +class NeighboursFromEdges : public NeighboursInterface { + public: + NeighboursFromEdges(); + + template + explicit NeighboursFromEdges(const SwapContainer& edges); + + /** Add the edges one-by-one if desired. + * @param edge An edge which you know is present in the graph. + */ + void add_edge(const Swap& edge); + + /** The caller must not call this too soon, before "add_edge" calls are + * completed. + * @param vertex A vertex in the graph + * @return All other vertices adjecent to the vertex (stored internally). + */ + virtual const std::vector& operator()(size_t vertex) override; + + private: + /** The key is the vertex, the value is the list of neighbours. */ + std::map> m_cached_neighbours; + + std::vector m_neighbours_storage; +}; + +template +NeighboursFromEdges::NeighboursFromEdges(const SwapContainer& edges) { + for (const Swap& edge : edges) { + add_edge(edge); + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp new file mode 100644 index 0000000000..9c84b236c3 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp @@ -0,0 +1,56 @@ +#include "PermutationTestUtils.hpp" + +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +std::array PermutationTestUtils::get_end_tokens_for_permutation( + unsigned permutation_hash) { + REQUIRE(permutation_hash >= 2); + std::vector digits; + { + unsigned perm_hash_copy = permutation_hash; + while (perm_hash_copy != 0) { + digits.push_back(perm_hash_copy % 10); + perm_hash_copy /= 10; + } + REQUIRE(!digits.empty()); + REQUIRE(std::is_sorted(digits.cbegin(), digits.cend())); + REQUIRE(digits[0] >= 2); + std::reverse(digits.begin(), digits.end()); + } + unsigned cycle_start_v = 0; + std::array tokens; + // No significance to 9999, just a number>5 which stands out + tokens.fill(9999); + for (unsigned cycle_length : digits) { + // We want to enact the cycle (a,b,c,d). Thus a->b, etc. is the vertex + // mapping. Now "tokens" represents what happens IF the vertex mapping is + // applied to [0,1,2,...]. Thus, whatever was INITIALLY at vertex "a" (the + // number "a" itself) should end up at "b", i.e. tokens[b] == a. + for (unsigned ii = 0; ii < cycle_length; ++ii) { + const unsigned source_v = cycle_start_v + ii; + const unsigned target_v = cycle_start_v + ((ii + 1) % cycle_length); + REQUIRE(source_v != target_v); + REQUIRE(source_v <= 5); + REQUIRE(target_v <= 5); + tokens[target_v] = source_v; + } + cycle_start_v += cycle_length; + } + REQUIRE(cycle_start_v <= 6); + for (unsigned ii = cycle_start_v; ii < 6; ++ii) { + tokens[ii] = ii; + } + for (unsigned tok : tokens) { + REQUIRE(tok < 6); + } + return tokens; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp new file mode 100644 index 0000000000..73c028194b --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp @@ -0,0 +1,27 @@ +#ifndef _TKET_TESTS_TokenSwapping_TableLookup_PermutationTestUtils_H_ +#define _TKET_TESTS_TokenSwapping_TableLookup_PermutationTestUtils_H_ + +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +// See CanonicalRelabelling.hpp for an explanation of the "permutation hash". + +struct PermutationTestUtils { + /** Given a permutation hash, return the final tokens after performing that + * mapping on the vertices 0,1,2,...,5 in the canonical way. + * @param permutation_hash A decimal number representing a permutation on + * {0,1,...,5}. + * @return The numbers {0,1,2,...,5} giving the final tokens, if we perform + * the permutation, with each start token label equalling the vertex label. + */ + static std::array get_end_tokens_for_permutation( + unsigned permutation_hash); +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp new file mode 100644 index 0000000000..21c8256a7c --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp @@ -0,0 +1,142 @@ +#include "SwapSequenceReductionTester.hpp" + +#include + +#include "NeighboursFromEdges.hpp" +#include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" +#include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" +#include "TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp" +#include "TokenSwapping/TableLookup/VertexMapResizing.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +static void reduce_sequence( + const vector& swaps, const VertexMapping& vertex_mapping, + NeighboursFromEdges& neighbours, SwapList& raw_swap_list, + SwapListOptimiser& general_optimiser, + const SwapSequenceReductionTester::Options& options) { + REQUIRE(!swaps.empty()); + + VertexMapResizing map_resizing(neighbours); + SwapListTableOptimiser table_optimiser; + SwapListSegmentOptimiser& segment_optimiser = + table_optimiser.get_segment_optimiser(); + raw_swap_list.clear(); + for (const auto& swap : swaps) { + raw_swap_list.push_back(swap); + } + std::set vertices_with_tokens; + for (const auto& entry : vertex_mapping) { + vertices_with_tokens.insert(entry.first); + } + + if (options.optimise_initial_segment_only) { + general_optimiser.optimise_pass_with_frontward_travel(raw_swap_list); + if (!raw_swap_list.empty()) { + table_optimiser.get_segment_optimiser().optimise_segment( + raw_swap_list.front_id().value(), vertices_with_tokens, map_resizing, + raw_swap_list); + } + return; + } + table_optimiser.optimise( + vertices_with_tokens, map_resizing, raw_swap_list, general_optimiser); +} + +static void check_solution( + VertexMapping problem_vertex_mapping, const SwapList& raw_swap_list) { + // Every vertex swap on a source->target mapping converts it to a new + // source->target map, i.e. map[v] = (token currently at v). + // So we BEGIN with every token equalling its target, + // thus at the end every token must equal its vertex. + for (auto id_opt = raw_swap_list.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = raw_swap_list.next(id); + const auto& swap = raw_swap_list.at(id); + const VertexSwapResult vswap_result(swap, problem_vertex_mapping); + } + REQUIRE(all_tokens_home(problem_vertex_mapping)); +} + +static size_t get_reduced_swaps_size_with_checks( + const vector& swaps, const VertexMapping& problem_vertex_mapping, + NeighboursFromEdges& neighbours_calculator, + SwapListOptimiser& general_optimiser, + const SwapSequenceReductionTester::Options& options) { + SwapList raw_swap_list; + reduce_sequence( + swaps, problem_vertex_mapping, neighbours_calculator, raw_swap_list, + general_optimiser, options); + check_solution(problem_vertex_mapping, raw_swap_list); + REQUIRE(raw_swap_list.size() <= swaps.size()); + return raw_swap_list.size(); +} + +size_t SwapSequenceReductionTester::get_checked_solution_size( + const DecodedProblemData& problem_data, + const SwapSequenceReductionTester::Options& options) { + NeighboursFromEdges neighbours_calculator(problem_data.swaps); + return get_reduced_swaps_size_with_checks( + problem_data.swaps, problem_data.vertex_mapping, neighbours_calculator, + m_general_optimiser, options); +} + +// Reduces the sequence of swaps, checks it, and returns the size. +size_t SwapSequenceReductionTester::get_checked_solution_size( + const DecodedProblemData& problem_data, + const DecodedArchitectureData& architecture_data, + const SwapSequenceReductionTester::Options& options) { + NeighboursFromEdges neighbours_calculator(architecture_data.edges); + return get_reduced_swaps_size_with_checks( + problem_data.swaps, problem_data.vertex_mapping, neighbours_calculator, + m_general_optimiser, options); +} + +SequenceReductionStats::SequenceReductionStats() + : problems(0), + reduced_problems(0), + total_original_swaps(0), + total_original_swaps_for_reduced_problems(0), + total_reduced_swaps(0) {} + +void SequenceReductionStats::add_solution( + size_t original_swaps, size_t reduced_swaps) { + REQUIRE(reduced_swaps <= original_swaps); + ++problems; + if (reduced_swaps < original_swaps) { + ++reduced_problems; + total_original_swaps_for_reduced_problems += original_swaps; + } + total_reduced_swaps += reduced_swaps; + total_original_swaps += original_swaps; +} + +std::string SequenceReductionStats::str() const { + std::stringstream ss; + const size_t swaps_for_equal_probs = + total_original_swaps - total_original_swaps_for_reduced_problems; + const size_t reduced_swaps_for_reduced_probs = + total_reduced_swaps - swaps_for_equal_probs; + const size_t overall_decrease = total_original_swaps - total_reduced_swaps; + ss << "[" << problems - reduced_problems << " equal probs (" + << swaps_for_equal_probs << "); " << reduced_problems << " reduced probs (" + << reduced_swaps_for_reduced_probs << " vs " + << total_original_swaps_for_reduced_problems << ")]\n[Overall reduction " + << total_reduced_swaps << " vs " << total_original_swaps << ": "; + if (total_original_swaps == 0) { + ss << "0%"; + } else { + ss << (100 * overall_decrease) / total_original_swaps << "%"; + } + ss << "]"; + return ss.str(); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp new file mode 100644 index 0000000000..5ec90aaa46 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp @@ -0,0 +1,57 @@ +#ifndef _TKET_TESTS_TokenSwapping_TableLookup_SwapSequenceReductionTester_H_ +#define _TKET_TESTS_TokenSwapping_TableLookup_SwapSequenceReductionTester_H_ + +#include + +#include "../TestUtils/DecodedProblemData.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" +#include "TokenSwapping/TableLookup/SwapListTableOptimiser.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** Directly test the results of table reductions on fixed swap sequences. */ +class SwapSequenceReductionTester { + public: + struct Options { + bool optimise_initial_segment_only; + }; + + // Reduces the sequence of swaps, checks it, and returns the size. + size_t get_checked_solution_size( + const DecodedProblemData& problem_data, + const DecodedArchitectureData& architecture_data, const Options& options); + + size_t get_checked_solution_size( + const DecodedProblemData& problem_data, const Options& options); + + private: + SwapListOptimiser m_general_optimiser; + // SwapList m_raw_swap_list; +}; + +struct SequenceReductionStats { + size_t problems; + size_t reduced_problems; + size_t total_original_swaps; + + // This only includes problems where the number of swaps strictly decreased + // after table reduction. + size_t total_original_swaps_for_reduced_problems; + + // This is the sum of "reduced_swaps" passed in, over all problems (including + // those where there was no decrease). + size_t total_reduced_swaps; + + SequenceReductionStats(); + + void add_solution(size_t original_swaps, size_t reduced_swaps); + + std::string str() const; +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp new file mode 100644 index 0000000000..abfd296282 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp @@ -0,0 +1,166 @@ +#include +#include +#include +#include + +#include "PermutationTestUtils.hpp" +#include "TokenSwapping/RNG.hpp" +#include "TokenSwapping/TableLookup/CanonicalRelabelling.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +// Every element must represent the SAME mapping, up to an appropriate +// relabelling. +typedef vector> + EquivalentMappings; + +// Everything in the OLD mapping does map to the expected vertex. +static void check_that_old_mapping_is_a_subset_of_expected( + const VertexMapping& mapping, + const CanonicalRelabelling::Result& relabelling, + const std::array& end_tokens) { + for (const auto& orig_source_target_pair : mapping) { + const auto& orig_source_v = orig_source_target_pair.first; + const auto& orig_target_v = orig_source_target_pair.second; + if (relabelling.old_to_new_vertices.count(orig_source_v) == 0) { + // If this old vertex is unmentioned, it must be fixed. + REQUIRE(orig_source_v == orig_target_v); + } else { + const auto new_source_v = + relabelling.old_to_new_vertices.at(orig_source_v); + const auto new_target_v = + relabelling.old_to_new_vertices.at(orig_target_v); + // end_tokens is the target->source mapping (the reverse of the usual). + REQUIRE(end_tokens.at(new_target_v) == new_source_v); + } + } +} + +// Everything in the expected new relabelled mapping agrees with the old +// mapping. +static void check_that_nonfixed_new_vertices_are_mentioned_in_old_mapping( + const VertexMapping& mapping, + const CanonicalRelabelling::Result& relabelling, + const std::array& end_tokens) { + for (unsigned new_target_v = 0; new_target_v < end_tokens.size(); + ++new_target_v) { + const auto new_source_v = end_tokens[new_target_v]; + if (new_source_v == new_target_v) { + // Is it mentioned in the old mapping? If so, it must be fixed. + if (new_source_v < relabelling.new_to_old_vertices.size()) { + const auto old_source_v = + relabelling.new_to_old_vertices.at(new_source_v); + if (mapping.count(old_source_v) != 0) { + // It IS mentioned, it MUST be fixed. + REQUIRE(mapping.at(old_source_v) == old_source_v); + } + } + continue; + } + // Different source, target, so the original mapping must mention this + // (otherwise, the mapping would be incomplete). + const auto old_source_v = relabelling.new_to_old_vertices.at(new_source_v); + const auto old_target_v = relabelling.new_to_old_vertices.at(new_target_v); + REQUIRE(mapping.at(old_source_v) == old_target_v); + } +} + +static void check_relabelling(const CanonicalRelabelling::Result& relabelling) { + REQUIRE( + relabelling.new_to_old_vertices.size() == + relabelling.old_to_new_vertices.size()); + REQUIRE(relabelling.new_to_old_vertices.size() >= 2); + for (unsigned new_v = 0; new_v < relabelling.new_to_old_vertices.size(); + ++new_v) { + const auto old_v = relabelling.new_to_old_vertices[new_v]; + REQUIRE(relabelling.old_to_new_vertices.at(old_v) == new_v); + } + for (const auto& old_new_pair : relabelling.old_to_new_vertices) { + REQUIRE( + relabelling.new_to_old_vertices.at(old_new_pair.second) == + old_new_pair.first); + } +} + +static void check_that_all_entries_have_the_same_permutation( + unsigned permutation_hash, const EquivalentMappings& list) { + REQUIRE(!list.empty()); + REQUIRE(permutation_hash >= 2); + + // end_tokens[i] tells us the SOURCE vertex of whatever token is now at vertex + // i. + const auto end_tokens = + PermutationTestUtils::get_end_tokens_for_permutation(permutation_hash); + + for (const auto& entry : list) { + const auto& mapping = entry.first; + const auto& relabelling = entry.second; + REQUIRE(relabelling.permutation_hash == permutation_hash); + check_relabelling(relabelling); + check_that_old_mapping_is_a_subset_of_expected( + mapping, relabelling, end_tokens); + check_that_nonfixed_new_vertices_are_mentioned_in_old_mapping( + mapping, relabelling, end_tokens); + } +} + +// Create various random permutations on sets of size <= 6 of arbitrary labels, +// and see that the relabellings work. +SCENARIO("Relabelling test for random mappings") { + const unsigned number_of_vertices = 5; + vector original_labels; + + // The generated mappings, together with the relabelling results. + // The key is the permutation hash. + std::map entries; + RNG rng; + VertexMapping original_map; + CanonicalRelabelling relabeller; + + for (unsigned nn = 0; nn < 200; ++nn) { + original_map.clear(); + for (unsigned ii = 0; ii < number_of_vertices; ++ii) { + original_map[rng.get_size_t(10000)]; + } + original_labels.clear(); + for (const auto& entry : original_map) { + original_labels.push_back(entry.first); + } + rng.do_shuffle(original_labels); + { + size_t ii = 0; + for (auto& entry : original_map) { + entry.second = original_labels[ii]; + ++ii; + } + } + const auto result = relabeller(original_map); + REQUIRE(!result.too_many_vertices); + if (result.identity) { + // Don't store identities. + REQUIRE(all_tokens_home(original_map)); + REQUIRE(result.permutation_hash == 0); + REQUIRE(result.old_to_new_vertices.empty()); + REQUIRE(result.new_to_old_vertices.empty()); + } else { + REQUIRE(result.permutation_hash > 0); + REQUIRE(result.old_to_new_vertices.size() == original_map.size()); + REQUIRE(result.new_to_old_vertices.size() == original_map.size()); + auto& list = entries[result.permutation_hash]; + list.push_back(std::make_pair(original_map, result)); + } + } + + for (const auto& entry : entries) { + check_that_all_entries_have_the_same_permutation(entry.first, entry.second); + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp new file mode 100644 index 0000000000..8d679d3a4c --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp @@ -0,0 +1,187 @@ +#include +#include + +#include "TokenSwapping/TSAUtils/DebugFunctions.hpp" +#include "TokenSwapping/TSAUtils/GeneralFunctions.hpp" +#include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" +#include "TokenSwapping/TableLookup/ExactMappingLookup.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { +struct ResultChecker { + size_t failed_due_to_too_many_vertices = 0; + size_t failed_due_to_table_missing_entry = 0; + size_t success = 0; + + void check_failed_result( + const ExactMappingLookup::Result& lookup_result, + const VertexMapping& desired_mapping) { + REQUIRE(!lookup_result.success); + if (lookup_result.too_many_vertices) { + CHECK(desired_mapping.size() >= 7); + ++failed_due_to_too_many_vertices; + return; + } + // There WERE enough edges. Why couldn't it find a solution? + // The graph must have been too big. + // The table should cover all 4-vertex mappings + // (at least up to depth 12, and probably all). + CHECK(desired_mapping.size() >= 5); + ++failed_due_to_table_missing_entry; + } + + void check_successful_result( + const ExactMappingLookup::Result& lookup_result, + const vector& sorted_edges_vect, VertexMapping desired_mapping) { + REQUIRE(lookup_result.success); + ++success; + // It succeeded. So, now we have to check it! + CHECK(!lookup_result.too_many_vertices); + + // desired_mapping is a source->target mapping. + // Interpret it to mean that mapping[i] = (current token on vertex i). + // So initially, (token at i) = (target vertex). + // Then, performing the swaps, all tokens should reach their home. + for (const auto& swap : lookup_result.swaps) { + REQUIRE(std::binary_search( + sorted_edges_vect.cbegin(), sorted_edges_vect.cend(), swap)); + std::swap(desired_mapping[swap.first], desired_mapping[swap.second]); + } + CHECK(all_tokens_home(desired_mapping)); + } +}; +} // namespace + +// We know that it succeeded and returned some swaps. +// Call it again with various max number of swaps limits. +static void recalculate_for_successful_problem_with_number_of_swaps_limits( + const VertexMapping& desired_mapping, const vector& edges_vect, + const vector& sorted_edges_vect, unsigned number_of_swaps, + ExactMappingLookup& lookup, ResultChecker& checker) { + for (unsigned max_number_of_swaps = 0; max_number_of_swaps < number_of_swaps; + ++max_number_of_swaps) { + const auto& lookup_result = + lookup(desired_mapping, edges_vect, max_number_of_swaps); + CHECK(!lookup_result.success); + } + for (unsigned max_number_of_swaps = number_of_swaps; + max_number_of_swaps < number_of_swaps + 5; ++max_number_of_swaps) { + const auto& lookup_result = + lookup(desired_mapping, edges_vect, max_number_of_swaps); + CHECK(lookup_result.success); + CHECK(lookup_result.swaps.size() == number_of_swaps); + checker.check_successful_result( + lookup_result, sorted_edges_vect, desired_mapping); + } +} + +// A simple monotonic transformation, avoids contiguous vertices. +static unsigned get_vertex_number(unsigned ii) { return 10 * ii * (ii + 2); } + +SCENARIO("Test exact mapping table lookup for wheel") { + // A star is vertex 0, joined to 1,2,3,...,m. + // A wheel also joins 1,2,...,m to make a cycle. + VertexMapping desired_mapping; + VertexMapping inverse_mapping; + ExactMappingLookup lookup; + + // Maintain an unsorted vector, just in case sorting them makes a difference + // (although it shouldn't). + vector all_edges; + vector all_edges_sorted; + vector vertices_used; + ResultChecker checker; + + for (unsigned number_of_spokes = 3; number_of_spokes <= 6; + ++number_of_spokes) { + vertices_used.clear(); + vertices_used.push_back(0); + all_edges.clear(); + for (unsigned ii = 1; ii <= number_of_spokes; ++ii) { + const auto vv = get_vertex_number(ii); + vertices_used.push_back(vv); + all_edges.push_back(get_swap(0, vv)); + } + // Complete the cycle on 1,2,...,m. + all_edges.push_back(get_swap(vertices_used.back(), vertices_used[1])); + for (unsigned ii = 1; ii < vertices_used.size(); ++ii) { + all_edges.push_back(get_swap(vertices_used[ii - 1], vertices_used[ii])); + } + + all_edges_sorted = all_edges; + std::sort(all_edges_sorted.begin(), all_edges_sorted.end()); + desired_mapping.clear(); + + // Set the SOURCE vertices. + for (auto vv : vertices_used) { + desired_mapping[vv]; + } + for (int perm_counter = 0;;) { + // Set the TARGET vertices. + { + unsigned ii = 0; + for (auto& entry : desired_mapping) { + entry.second = vertices_used[ii]; + ++ii; + } + } + bool succeeded = false; + unsigned number_of_swaps = 0; + + // We have a mapping. Try to look it up. Also, look up the inverse. + inverse_mapping = get_reversed_map(desired_mapping); + { + // Care...because the result is stored internally, + // another call to lookup will invalidate it! + const auto& lookup_result = lookup(desired_mapping, all_edges); + succeeded = lookup_result.success; + if (lookup_result.success) { + checker.check_successful_result( + lookup_result, all_edges_sorted, desired_mapping); + number_of_swaps = lookup_result.swaps.size(); + + const auto& inverse_lookup_result = + lookup(inverse_mapping, all_edges); + CHECK(inverse_lookup_result.success); + + checker.check_successful_result( + inverse_lookup_result, all_edges_sorted, inverse_mapping); + CHECK(number_of_swaps == inverse_lookup_result.swaps.size()); + } else { + // It failed. Why? + checker.check_failed_result(lookup_result, desired_mapping); + const auto& inverse_lookup_result = + lookup(inverse_mapping, all_edges); + checker.check_failed_result(inverse_lookup_result, inverse_mapping); + } + } + + if (succeeded) { + recalculate_for_successful_problem_with_number_of_swaps_limits( + desired_mapping, all_edges, all_edges_sorted, number_of_swaps, + lookup, checker); + } + ++perm_counter; + if (perm_counter > 10) { + break; + } + if (!std::next_permutation(vertices_used.begin(), vertices_used.end())) { + break; + } + } + } + + CHECK(checker.failed_due_to_too_many_vertices == 22); + CHECK(checker.failed_due_to_table_missing_entry == 0); + CHECK(checker.success == 231); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp new file mode 100644 index 0000000000..427c31ed7a --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp @@ -0,0 +1,141 @@ +#include +#include +#include + +#include "TokenSwapping/RNG.hpp" +#include "TokenSwapping/TableLookup/FilteredSwapSequences.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("Trivial table lookup tests") { + // Permutation hash 0 is the identity. + for (unsigned edges_bitset = 0; edges_bitset < 50; ++edges_bitset) { + const FilteredSwapSequences::SingleSequenceData identity_result( + 0, edges_bitset, 10); + CHECK(identity_result.edges_bitset == 0); + CHECK(identity_result.swaps_code == 0); + CHECK(identity_result.number_of_swaps == 0); + } + + // (0,1) is the first swap (index 0). So, just need to include that bit. + for (unsigned edges_bitset = 1; edges_bitset < 50; edges_bitset += 2) { + const FilteredSwapSequences::SingleSequenceData single_swap_result( + 2, edges_bitset, 10); + CHECK(single_swap_result.edges_bitset == 0x1); + CHECK(single_swap_result.swaps_code == 0x1); + CHECK(single_swap_result.number_of_swaps == 1); + } + + // Enact a non-identity permutation without edges; impossible! + const vector nontrivial_permutation_hashes{2, 3, 4, 5, 6, + 22, 33, 32, 42, 222}; + for (unsigned perm_hash : nontrivial_permutation_hashes) { + const FilteredSwapSequences::SingleSequenceData impossible_result( + perm_hash, 0x0, 10); + CHECK(impossible_result.edges_bitset == 0); + CHECK(impossible_result.swaps_code == 0); + CHECK( + impossible_result.number_of_swaps == + std::numeric_limits::max()); + } +} + +SCENARIO("Random entries test") { + // Note: the entries are definitely NOT real swap sequence codes, + // they are just random nunmbers. + + const unsigned num_bits = 15; + + std::map + original_entries; + // Make a vector, with duplicates. + vector codes_vect; + + RNG rng; + + for (unsigned nn = 0; nn < 1000; ++nn) { + const auto num_swaps = rng.get_size_t(1, 6); + SwapConversion::SwapHash code = 0; + SwapConversion::EdgesBitset edges_bitset = 0; + + for (unsigned mm = 0; mm < num_swaps; ++mm) { + const auto new_swap = rng.get_size_t(1, num_bits); + code <<= 4; + code |= new_swap; + edges_bitset |= (1u << (new_swap - 1)); + } + auto& entry = original_entries[code]; + entry.edges_bitset = edges_bitset; + entry.swaps_code = code; + entry.number_of_swaps = num_swaps; + for (int kk = 0; kk < 3; ++kk) { + codes_vect.push_back(code); + } + } + rng.do_shuffle(codes_vect); + + FilteredSwapSequences filtered_sequences; + REQUIRE(filtered_sequences.get_total_number_of_entries() == 0); + filtered_sequences.initialise(codes_vect); + REQUIRE( + filtered_sequences.get_total_number_of_entries() == + original_entries.size()); + + // Now, look up every single edge bitset in turn and check that it finds the + // (joint) fewest number of swaps. + const SwapConversion::EdgesBitset max_bitset = (1u << num_bits) - 1; + for (SwapConversion::EdgesBitset bitset = 0; bitset <= max_bitset; ++bitset) { + // By brute force, find the (joint) fewest number of swaps in a sequence + // using only this bitset. + SwapConversion::SwapHash fewest_swaps_code = + std::numeric_limits::max(); + unsigned number_of_swaps = 10000; + for (const auto& entry : original_entries) { + if (entry.first > fewest_swaps_code) { + break; + } + REQUIRE(entry.second.number_of_swaps <= number_of_swaps); + // Is it a subset? + if ((entry.second.edges_bitset & bitset) != entry.second.edges_bitset) { + continue; + } + // We've found a better entry than what we've got. + number_of_swaps = entry.second.number_of_swaps; + fewest_swaps_code = entry.first; + } + + for (unsigned max_num_swaps = 1; max_num_swaps < num_bits + 3; + ++max_num_swaps) { + const auto result = + filtered_sequences.get_lookup_result(bitset, max_num_swaps); + if (result.number_of_swaps <= max_num_swaps) { + // It found an entry. It must be an existing entry. + const auto& existing_entry = original_entries.at(result.swaps_code); + REQUIRE(result.number_of_swaps == existing_entry.number_of_swaps); + REQUIRE(result.edges_bitset == existing_entry.edges_bitset); + REQUIRE(result.swaps_code == existing_entry.swaps_code); + + // ...and it must be valid... + REQUIRE((result.edges_bitset & bitset) == result.edges_bitset); + REQUIRE(result.number_of_swaps == number_of_swaps); + } else { + // No entry was found. It MUST be because none actually exist, subject + // to the constraints. + REQUIRE(number_of_swaps > max_num_swaps); + // Must be a null result. + REQUIRE(result.edges_bitset == 0); + REQUIRE(result.swaps_code == 0); + REQUIRE(result.number_of_swaps == std::numeric_limits::max()); + } + } + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp new file mode 100644 index 0000000000..bd48493dc1 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp @@ -0,0 +1,152 @@ +#include +#include + +#include "../Data/FixedCompleteSolutions.hpp" +#include "../Data/FixedSwapSequences.hpp" +#include "SwapSequenceReductionTester.hpp" + +; +using std::vector; + +// NOTE: running all tests in this file currently takes ~19 seconds +// on an ordinary Windows laptop. + +namespace tket { +namespace tsa_internal { +namespace tests { + +static void add_message( + const SequenceReductionStats& stats, const std::string& extra_message, + const SwapSequenceReductionTester::Options& options, + vector& calc_messages) { + std::stringstream ss; + ss << "[n=" << calc_messages.size() << ", " << extra_message + << ": init segm optim? " << std::boolalpha + << options.optimise_initial_segment_only << "]\n" + << stats.str(); + calc_messages.push_back(ss.str()); +} + +static void check_final_messages( + vector& expected_messages, + const vector& calc_messages) { + CHECK(expected_messages.size() == calc_messages.size()); + expected_messages.resize(calc_messages.size()); + for (unsigned ii = 0; ii < calc_messages.size(); ++ii) { + CHECK(expected_messages[ii] == calc_messages[ii]); + } +} + +// Reduce the fixed swap sequences, with edge set implicitly defined +// by the swaps themselves. +SCENARIO("Fixed swap sequences reduction") { + vector expected_messages{ + "[n=0, Full tokens: init segm optim? true]\n" + "[478 equal probs (17115); 2 reduced probs (25 vs 29)]\n" + "[Overall reduction 17140 vs 17144: 0%]", + + "[n=1, Partial tokens: init segm optim? true]\n" + "[880 equal probs (25432); 16 reduced probs (385 vs 407)]\n" + "[Overall reduction 25817 vs 25839: 0%]", + + "[n=2, Full tokens: init segm optim? false]\n" + "[423 equal probs (14323); 57 reduced probs (2693 vs 2821)]\n" + "[Overall reduction 17016 vs 17144: 0%]", + + "[n=3, Partial tokens: init segm optim? false]\n" + "[658 equal probs (12376); 238 reduced probs (12962 vs 13463)]\n" + "[Overall reduction 25338 vs 25839: 1%]"}; + + const FixedSwapSequences fixed_sequences; + SwapSequenceReductionTester tester; + SwapSequenceReductionTester::Options options; + vector calc_messages; + + const auto add_solutions = [&tester, &options]( + const vector& seq_codes, + SequenceReductionStats& stats) { + for (const auto& code_str : seq_codes) { + const DecodedProblemData problem_data(code_str); + const auto reduced_size = + tester.get_checked_solution_size(problem_data, options); + stats.add_solution(problem_data.swaps.size(), reduced_size); + } + }; + + for (int ii = 0; ii < 2; ++ii) { + options.optimise_initial_segment_only = (ii % 2 == 0); + { + SequenceReductionStats full_tokens_stats; + add_solutions(fixed_sequences.full, full_tokens_stats); + add_solutions(fixed_sequences.full_with_errors, full_tokens_stats); + add_message(full_tokens_stats, "Full tokens", options, calc_messages); + } + { + SequenceReductionStats partial_tokens_stats; + add_solutions(fixed_sequences.partial, partial_tokens_stats); + add_solutions(fixed_sequences.partial_with_errors, partial_tokens_stats); + add_message( + partial_tokens_stats, "Partial tokens", options, calc_messages); + } + } + check_final_messages(expected_messages, calc_messages); +} + +// The actual problem input data: the graph may have extra edges +// not present in the returned solution. +SCENARIO("Fixed complete problems") { + vector expected_messages{ + "[n=0, Small: init segm optim? false]\n" + "[249 equal probs (1353); 29 reduced probs (163 vs 204)]\n" + "[Overall reduction 1516 vs 1557: 2%]", + + "[n=1, Medium: init segm optim? false]\n" + "[167 equal probs (2650); 60 reduced probs (1107 vs 1234)]\n" + "[Overall reduction 3757 vs 3884: 3%]", + + "[n=2, Large: init segm optim? false]\n" + "[164 equal probs (12771); 408 reduced probs (43946 vs 45894)]\n" + "[Overall reduction 56717 vs 58665: 3%]"}; + + SwapSequenceReductionTester::Options options; + options.optimise_initial_segment_only = false; + + // Separate problems into small, medium, large. + vector stats(3); + + const FixedCompleteSolutions complete_solutions; + SwapSequenceReductionTester tester; + + for (const auto& problem_entry : complete_solutions.solutions) { + // First element encodes the edges. + const DecodedArchitectureData arch_data(problem_entry.second[0]); + for (unsigned ii = 1; ii < problem_entry.second.size(); ++ii) { + const auto& problem_str = problem_entry.second[ii]; + const DecodedProblemData problem_data( + problem_str, DecodedProblemData::RequireContiguousVertices::NO); + + // Small + unsigned stats_index = 0; + if (problem_str.size() > 25) { + // Medium + stats_index = 1; + } + if (problem_str.size() > 60) { + // Large + stats_index = 2; + } + const auto reduced_size = + tester.get_checked_solution_size(problem_data, arch_data, options); + stats[stats_index].add_solution(problem_data.swaps.size(), reduced_size); + } + } + vector calc_messages; + add_message(stats[0], "Small", options, calc_messages); + add_message(stats[1], "Medium", options, calc_messages); + add_message(stats[2], "Large", options, calc_messages); + check_final_messages(expected_messages, calc_messages); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp new file mode 100644 index 0000000000..296a0b5ed0 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp @@ -0,0 +1,165 @@ +#include +#include +#include + +#include "PermutationTestUtils.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" +#include "TokenSwapping/TableLookup/SwapConversion.hpp" +#include "TokenSwapping/TableLookup/SwapSequenceTable.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +// Extra redundant data in the table slows it down, +// but does not affect the returned results. +// But the stored swap sequences are used directly without further checks +// or optimisations, so they should be as close to optimal as possible. +static void test_irreducibility_of_codes( + unsigned permutation_hash, const vector& codes, + SwapListOptimiser& optimiser, SwapList& swap_list) { + for (auto& code : codes) { + swap_list.fast_clear(); + auto swap_sequence_hash_copy = code; + while (swap_sequence_hash_copy != 0) { + const Swap& swap = + SwapConversion::get_swap_from_hash(swap_sequence_hash_copy & 0xF); + swap_list.push_back(swap); + swap_sequence_hash_copy >>= 4; + } + const auto initial_number_of_swaps = swap_list.size(); + + // We don't yet have good theoretical results about order of passes, + // so just try all of them. + optimiser.optimise_pass_with_zero_travel(swap_list); + REQUIRE(initial_number_of_swaps == swap_list.size()); + optimiser.optimise_pass_with_token_tracking(swap_list); + REQUIRE(initial_number_of_swaps == swap_list.size()); + + // This may reorder the swaps, without reducing. + optimiser.optimise_pass_with_frontward_travel(swap_list); + REQUIRE(initial_number_of_swaps == swap_list.size()); + + // We'd LIKE to have a theorem assuring us that this pass isn't necessary + // after the previous passes, but currently we don't. + optimiser.optimise_pass_with_token_tracking(swap_list); + REQUIRE(initial_number_of_swaps == swap_list.size()); + optimiser.optimise_pass_with_zero_travel(swap_list); + REQUIRE(initial_number_of_swaps == swap_list.size()); + } +} + +// All the swap sequences encoded in the vector should enact +// the given permutation. +static void test_correctness_of_codes( + unsigned permutation_hash, const vector& codes) { + REQUIRE(codes.size() >= 2); + + // Reconstruct the desired permutation from the hash. + const auto expected_tokens = + PermutationTestUtils::get_end_tokens_for_permutation(permutation_hash); + + // Element i is the token at vertex i. + // We start with tokens 0,1,2,...,5 on vertices 0,1,2,...,5, + // then perform the swaps. + std::array tokens; + for (const auto& code : codes) { + std::iota(tokens.begin(), tokens.end(), 0); + auto swap_sequence_hash_copy = code; + unsigned number_of_swaps = 0; + while (swap_sequence_hash_copy != 0) { + const Swap& swap = + SwapConversion::get_swap_from_hash(swap_sequence_hash_copy & 0xF); + swap_sequence_hash_copy >>= 4; + std::swap(tokens[swap.first], tokens[swap.second]); + ++number_of_swaps; + } + REQUIRE(number_of_swaps >= 1); + + // Actually, 16 is the maximum. + CHECK(number_of_swaps <= 12); + REQUIRE(tokens == expected_tokens); + } +} + +// The swap sequences encoded in the vector should not have +// any redundancies: if sequences S1, S2 have edge bitsets E1, E2 +// (i.e., E(j) is the set of swaps used in S(j)), AND give the same permutation, +// then E1 != E2. (No point in having both). +// Also, if E1 is a subset of E2, then length(S2) < length(S1). +// (Otherwise, S2 would be a pointless entry: whenever S2 is possible, +// S1 is also possible, with an equal or smaller number of swaps). +static void test_redundancies( + unsigned permutation_hash, const vector& codes) { + vector edge_bitsets; + edge_bitsets.reserve(codes.size()); + for (const auto& code : codes) { + edge_bitsets.push_back(SwapConversion::get_edges_bitset(code)); + } + // Crude quadratic algorithm to check which codes are redundant. + // Don't rely on sorted codes. + for (unsigned ii = 0; ii < codes.size(); ++ii) { + for (unsigned jj = 0; jj < codes.size(); ++jj) { + if (ii == jj) { + continue; + } + const auto intersection = edge_bitsets[ii] & edge_bitsets[jj]; + const bool e1_subset_of_e2 = (intersection == edge_bitsets[ii]); + const auto num_swaps1 = SwapConversion::get_number_of_swaps(codes[ii]); + const auto num_swaps2 = SwapConversion::get_number_of_swaps(codes[jj]); + + if (e1_subset_of_e2 && num_swaps1 <= num_swaps2) { + INFO( + "For perm.hash " + << permutation_hash << ", Code 1: 0x" << std::hex << codes[ii] + << " only uses swaps from code 2: 0x" << codes[jj] + << ", and uses the same or fewer swaps (" << std::dec << num_swaps1 + << " vs " << num_swaps2 + << "). Thus code 2 is pointless and could be removed."); + CHECK(false); + } + } + } +} + +// Checks that all entries returned by the table do actually +// give the required permutation of vertices. +SCENARIO("Fixed table entries test") { + const auto table = SwapSequenceTable::get_table(); + // const auto table = get_new_table(); + SwapListOptimiser optimiser; + SwapList swap_list; + unsigned total_entries = 0; + for (const auto& entry : table) { + REQUIRE(entry.first >= 2); + test_correctness_of_codes(entry.first, entry.second); + test_irreducibility_of_codes( + entry.first, entry.second, optimiser, swap_list); + test_redundancies(entry.first, entry.second); + + // No duplication. Not necessary, but a good test. + CHECK(std::is_sorted(entry.second.cbegin(), entry.second.cend())); + CHECK( + std::adjacent_find(entry.second.cbegin(), entry.second.cend()) == + entry.second.cend()); + + // NOTE: we should really also test that inverse mappings are not stored in + // the table. This was previously true, but a negligibly small number of + // entries have crept in. They're a bit fiddly to track down and remove, so + // forget about them for now. (Confusion: within each permutation hash, e.g. + // 32 corresponding to (012)(34)(5), the INVERSE mapping is (021)(34)(5). + // This will have the same permutation hash, but of course vertices must be + // RELABELLED. To find the inverse entry in the table, we cannot JUST + // reverse the swaps, we also need to relabel them. + /// TODO: test for, track down and remove redundant inverse entries. + total_entries += entry.second.size(); + } + CHECK(total_entries == 7939); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp new file mode 100644 index 0000000000..e4a0837823 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp @@ -0,0 +1,153 @@ +#include "BestTsaTester.hpp" + +#include + +#include "TokenSwapping/ArchitectureMapping.hpp" +#include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" +#include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { + +// We are going to treat the raw data in FixedSwapSequences etc. as +// the "correct" data, which we don't want to relabel or process further. +// +// But when an Architecture object is created with a vector of edges, +// given by pairs ("raw" vertices), +// vertex relabelling takes place. +// Thus we need an extra layer of conversion to get back what we want. +struct VertexRelabellingManager { + std::map raw_to_internal_map; + // The internal indices are, of course, 0,1,2,...,N for some N, + // and therefore we can use a vector instead of a map. + vector internal_to_raw_map; + + // The exact same edges that were used to construct the Architecture object + // (in the same order!) must be passed in. + explicit VertexRelabellingManager( + const vector>& raw_edges) { + for (auto edge : raw_edges) { + size_t next_index = raw_to_internal_map.size(); + if (raw_to_internal_map.count(edge.first) == 0) { + raw_to_internal_map[edge.first] = next_index; + } + next_index = raw_to_internal_map.size(); + if (raw_to_internal_map.count(edge.second) == 0) { + raw_to_internal_map[edge.second] = next_index; + } + } + internal_to_raw_map.resize(raw_to_internal_map.size()); + for (const auto& entry : raw_to_internal_map) { + internal_to_raw_map[entry.second] = entry.first; + } + } + Swap get_raw_swap(Swap internal_swap) const { + return get_swap( + internal_to_raw_map.at(internal_swap.first), + internal_to_raw_map.at(internal_swap.second)); + } + + // To be used as input to the TSA. + // Gives the source->target mappings for INTERNAL vertices. + VertexMapping get_internal_mapping_for_tsa_input( + const VertexMapping& raw_mapping) const { + VertexMapping mapping; + for (const auto& entry : raw_mapping) { + mapping[raw_to_internal_map.at(entry.first)] = + raw_to_internal_map.at(entry.second); + } + return mapping; + } +}; +} // namespace + +BestFullTsa& BestTsaTester::get_best_full_tsa() { return m_best_full_tsa; } + +size_t BestTsaTester::get_checked_solution_size( + const DecodedProblemData& problem_data) { + m_architecture_work_data.edges.clear(); + for (const auto& swap : problem_data.swaps) { + m_architecture_work_data.edges.insert(swap); + } + m_architecture_work_data.number_of_vertices = 0; + return get_checked_solution_size(problem_data, m_architecture_work_data); +} + +size_t BestTsaTester::get_checked_solution_size( + const DecodedProblemData& problem_data, + const DecodedArchitectureData& architecture_data) { + CHECK(problem_data.number_of_vertices >= 4); + if (architecture_data.number_of_vertices > 0) { + CHECK( + architecture_data.number_of_vertices >= + problem_data.number_of_vertices); + } + // problem_data.number_of_vertices only includes the vertices mentioned in the + // solution swaps. + // architecture_data.number_of_vertices is EITHER set to zero, + // OR is calculated from the EDGES in the architecture, and hence is correct. + const auto number_of_vertices = std::max( + architecture_data.number_of_vertices, problem_data.number_of_vertices); + + check_mapping(problem_data.vertex_mapping); + for (const auto& swap : problem_data.swaps) { + REQUIRE(architecture_data.edges.count(swap) != 0); + } + for (const auto& edge : architecture_data.edges) { + REQUIRE(edge.first < number_of_vertices); + REQUIRE(edge.second < number_of_vertices); + } + m_edges_vect = vector>{ + architecture_data.edges.cbegin(), architecture_data.edges.cend()}; + + REQUIRE(problem_data.vertex_mapping.size() >= 1); + REQUIRE(problem_data.vertex_mapping.size() <= number_of_vertices); + REQUIRE(problem_data.vertex_mapping.crbegin()->first < number_of_vertices); + + const bool full_tokens = + problem_data.vertex_mapping.size() == number_of_vertices; + + const Architecture arch(m_edges_vect); + const ArchitectureMapping arch_mapping(arch); + const VertexRelabellingManager relabelling_manager(m_edges_vect); + m_raw_swap_list.clear(); + m_vertex_mapping_copy = + relabelling_manager.get_internal_mapping_for_tsa_input( + problem_data.vertex_mapping); + m_best_full_tsa.append_partial_solution( + m_raw_swap_list, m_vertex_mapping_copy, arch_mapping); + + // Now check the calculated solution. + // Set it back to the raw, i.e. "proper" mapping. + m_vertex_mapping_copy = problem_data.vertex_mapping; + + for (auto id_opt = m_raw_swap_list.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = m_raw_swap_list.next(id); + auto& swap = m_raw_swap_list.at(id); + // This is an "internal" swap, so needs conversion back to "raw". + swap = relabelling_manager.get_raw_swap(swap); + + const VertexSwapResult vswap_result(swap, m_vertex_mapping_copy); + if (full_tokens) { + REQUIRE(vswap_result.tokens_moved == 2); + } else { + // We require our best TSA to avoid empty swaps. + REQUIRE(vswap_result.tokens_moved >= 1); + REQUIRE(vswap_result.tokens_moved <= 2); + } + REQUIRE(architecture_data.edges.count(swap) != 0); + } + REQUIRE(all_tokens_home(m_vertex_mapping_copy)); + return m_raw_swap_list.size(); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp new file mode 100644 index 0000000000..1dad2b38aa --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp @@ -0,0 +1,54 @@ +#ifndef _TKET_TESTS_TokenSwapping_TestUtils_BestTsaTester_H_ +#define _TKET_TESTS_TokenSwapping_TestUtils_BestTsaTester_H_ + +#include "DecodedProblemData.hpp" +#include "TokenSwapping/BestFullTsa.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** Solves a fixed problem using the current best TSA. */ +class BestTsaTester { + public: + /** Computes a solution to the problem using our best TSA, + * checks it, and returns how many swaps it needed. + * The edges of the graph are directly taken from the list of swaps in the + * reference solution. + * @param data The problem data which was decoded from a string. + * @return The number of swaps returned by our TSA. The calculated swaps are + * also checked for correctness. + */ + size_t get_checked_solution_size(const DecodedProblemData& data); + + /** For problems where the architecture is NOT simply given implicitly + * by the swap sequence, so we must also pass in the complete set + * of edges, some of which might not appear in the final swaps. + * @param problem_data The data about a specific problem (calculated swaps, + * etc.) + * @param architecture_data Data about the architecture for the problem which + * is NOT deduced implicitly from the problem data itself (i.e., the edges). + * @return The number of swaps returned by our TSA. The calculated swaps are + * also checked for correctness. + */ + size_t get_checked_solution_size( + const DecodedProblemData& problem_data, + const DecodedArchitectureData& architecture_data); + + /** For convenience in testing/experiments, allow access to the TSA, + * to change parameters etc. etc. from their defaults. + */ + BestFullTsa& get_best_full_tsa(); + + private: + BestFullTsa m_best_full_tsa; + SwapList m_raw_swap_list; + DecodedArchitectureData m_architecture_work_data; + std::vector> m_edges_vect; + VertexMapping m_vertex_mapping_copy; +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp new file mode 100644 index 0000000000..51fc774618 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp @@ -0,0 +1,167 @@ +#include "DecodedProblemData.hpp" + +#include + +#include "TokenSwapping/TSAUtils/GeneralFunctions.hpp" +#include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +/// TODO: move somewhere more appropriate. +// initially, "vm" has keys equal to the vertices with tokens; the values are +// ignored. Change to the desired source->target mapping, as used in all problem +// solving, induced by the swaps. Return the number of empty swaps. +static unsigned get_problem_mapping( + VertexMapping& vm, const vector& swaps) { + const auto init_num_tokens = vm.size(); + for (auto& entry : vm) { + entry.second = entry.first; + } + unsigned empty_swaps = 0; + for (auto swap : swaps) { + const VertexSwapResult result(swap, vm); + if (result.tokens_moved == 0) { + ++empty_swaps; + } + } + // Each time we had v1->t1, v2->t2 and we swapped v1,v2, we then got v1->t2, + // v2->t1. Thus, the KEY is a vertex, the VALUE is the token currently on that + // vertex. So, the VALUES are the tokens, which are the vertex it originally + // came from, i.e., it's end vertex -> original vertex. So our desired problem + // mapping source -> target is the REVERSE!! + vm = get_reversed_map(vm); + REQUIRE(init_num_tokens == vm.size()); + check_mapping(vm); + return empty_swaps; +} + +static const std::string& encoding_chars() { + static const std::string chars{ + "0123456789abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ"}; + return chars; +} + +static std::map get_char_to_vertex_map_local() { + std::map char_to_vertex_map; + const auto& chars = encoding_chars(); + for (size_t ii = 0; ii < chars.size(); ++ii) { + char_to_vertex_map[chars[ii]] = ii; + } + return char_to_vertex_map; +} + +static const std::map& char_to_vertex_map() { + static const std::map map( + get_char_to_vertex_map_local()); + return map; +} + +DecodedProblemData::DecodedProblemData( + const std::string& str, + RequireContiguousVertices require_contiguous_vertices) { + if (str.empty()) { + return; + } + + unsigned index = 0; + bool separator_found = false; + while (index < str.size()) { + if (str.at(index) == '_') { + ++index; + separator_found = true; + break; + } + const auto v1 = char_to_vertex_map().at(str.at(index)); + const auto v2 = char_to_vertex_map().at(str.at(index + 1)); + swaps.emplace_back(get_swap(v1, v2)); + index += 2; + } + + std::set vertices; + for (auto swap : swaps) { + vertices.insert(swap.first); + vertices.insert(swap.second); + } + CHECK(vertices.size() >= 4); + number_of_vertices = vertices.size(); + if (require_contiguous_vertices == RequireContiguousVertices::YES) { + REQUIRE(*vertices.crbegin() + 1 == vertices.size()); + } + + // Now set up the vertex mapping. Initially, all vertices with tokens + // have a token value equal to the vertex number. + vertex_mapping.clear(); + if (separator_found) { + unsigned num_tokens = 0; + for (; index < str.size(); ++index) { + const auto vv = char_to_vertex_map().at(str.at(index)); + if (require_contiguous_vertices == RequireContiguousVertices::YES) { + // It came from a swap sequence. Therefore, there are no extra edges, + // so every vertex must exist on a USED edge. + REQUIRE(vertices.count(vv) != 0); + } + vertex_mapping[vv]; + ++num_tokens; + } + REQUIRE(num_tokens == vertex_mapping.size()); + } else { + REQUIRE(index == str.size()); + for (auto vv : vertices) { + vertex_mapping[vv]; + } + } + // NOW, perform the swaps. + get_problem_mapping(vertex_mapping, swaps); +} + +DecodedArchitectureData::DecodedArchitectureData() : number_of_vertices(0) {} + +DecodedArchitectureData::DecodedArchitectureData( + const std::string& solution_edges_string) { + vector> neighbours(1); + std::set vertices_seen; + for (unsigned char ch : solution_edges_string) { + if (ch != ':') { + const auto new_v = char_to_vertex_map().at(ch); + neighbours.back().push_back(new_v); + vertices_seen.insert(new_v); + continue; + } + // We move onto the next vertex. + neighbours.emplace_back(); + } + // The last vertex N cannot have any neighbours j with j>N, + // so we don't bother to record it in the string, + // so it's not stored in "neighbours". + number_of_vertices = neighbours.size() + 1; + CHECK(number_of_vertices >= 4); + // But everything MUST be joined to something, if the graph is connected. + // Vertex v won't be listed if it only joins higher-numbered vertices, + // so many vertices might not be mentioned here. + REQUIRE(!vertices_seen.empty()); + REQUIRE(*vertices_seen.crbegin() <= neighbours.size()); + + for (size_t ii = 0; ii < neighbours.size(); ++ii) { + if (neighbours[ii].empty()) { + continue; + } + REQUIRE(std::is_sorted(neighbours[ii].cbegin(), neighbours[ii].cend())); + REQUIRE(neighbours[ii][0] > ii); + REQUIRE( + std::adjacent_find(neighbours[ii].cbegin(), neighbours[ii].cend()) == + neighbours[ii].cend()); + for (auto jj : neighbours[ii]) { + edges.insert(get_swap(ii, jj)); + } + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp new file mode 100644 index 0000000000..dcb97ab62b --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp @@ -0,0 +1,64 @@ +#ifndef _TKET_TESTS_TokenSwapping_TestUtils_DecodedProblemData_H_ +#define _TKET_TESTS_TokenSwapping_TestUtils_DecodedProblemData_H_ + +#include +#include + +#include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** For converting a raw string, representing a fixed sequence of swaps, + * into a problem for a TSA. */ +struct DecodedProblemData { + /** This is, of course, a possible SOLUTION to the problem, not part + * of the problem input data itself. Since we know at least one solution + * (this one), we can compare it with our returned solution + * to see how good it is. + */ + std::vector swaps; + + /** The desired source->target mapping for a problem. */ + VertexMapping vertex_mapping; + + size_t number_of_vertices; + + /** Do we require the vertex numbers to be {0,1,2,...,m}, with no gaps? */ + enum class RequireContiguousVertices { YES, NO }; + + explicit DecodedProblemData( + const std::string& str, + RequireContiguousVertices require_contiguous_vertices = + RequireContiguousVertices::YES); +}; + +/** For decoding strings like "1e:2d:3c:4b:5a:69:8:8:9:a:b:c:d:e" + * as seen in FixedCompleteSolutions, which encode + * the neighbours of vertices 0,1,2,...,N. + * Only edges(i,j) with i edges; + + /** The vertex numbers are contiguous, i.e. 0,1,2,...N for some N. */ + size_t number_of_vertices; + + /** Simply without filling any data. */ + DecodedArchitectureData(); + + /** Decodes and fills the data upon construction. + * @param solution_edges_string A string which encodes the edges of an + * architecture. + */ + explicit DecodedArchitectureData(const std::string& solution_edges_string); +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp new file mode 100644 index 0000000000..e0b8a60c17 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp @@ -0,0 +1,220 @@ +#include "FullTsaTesting.hpp" + +#include + +#include "TokenSwapping/ArchitectureMapping.hpp" +#include "TokenSwapping/DistancesFromArchitecture.hpp" +#include "TokenSwapping/NeighboursFromArchitecture.hpp" +#include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "TokenSwapping/TSAUtils/DebugFunctions.hpp" +#include "TokenSwapping/TSAUtils/DistanceFunctions.hpp" +#include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +void FullTsaTesting::check_solution( + size_t counts_list_index, VertexMapping vertex_mapping, size_t lower_bound, + AllowEmptySwaps allow_empty_swaps) { + bool empty_swap_occurred = false; + REQUIRE(m_swap_list.size() >= lower_bound); + for (auto swap : m_swap_list.to_vector()) { + const VertexSwapResult swap_res(swap, vertex_mapping); + if (swap_res.tokens_moved == 0) { + empty_swap_occurred = true; + } + } + if (empty_swap_occurred && allow_empty_swaps == AllowEmptySwaps::NO) { + INFO( + "index=" << counts_list_index << ", " << vertex_mapping.size() + << " toks; lb=" << lower_bound << "; " << m_swap_list.size() + << " swaps"); + CHECK(false); + } + REQUIRE(all_tokens_home(vertex_mapping)); + auto& swaps = m_counts_list[counts_list_index].sorted_swaps; + swaps = m_swap_list.to_vector(); + std::sort(swaps.begin(), swaps.end()); +} + +void FullTsaTesting::check_equivalent_good_solution( + size_t existing_index, VertexMapping vertex_mapping, + AllowEmptySwaps allow_empty_swaps) { + check_solution( + m_counts_list.size() - 1, vertex_mapping, 0, allow_empty_swaps); + INFO("existing_index=" << existing_index); + CHECK( + m_counts_list[existing_index].sorted_swaps == + m_counts_list.back().sorted_swaps); +} + +void FullTsaTesting::test_order(size_t index1, size_t index2) const { + INFO("i1=" << index1 << ", i2=" << index2); + CHECK( + m_counts_list[index1].sorted_swaps.size() <= + m_counts_list[index2].sorted_swaps.size()); +} + +void FullTsaTesting::complete_counts_list_for_single_problem() { + size_t smallest_number = m_counts_list[0].sorted_swaps.size(); + + // Ignore the last index, which is a dummy. + for (size_t index = 0; index + 1 < m_counts_list.size(); ++index) { + auto& counts = m_counts_list[index]; + counts.total_swaps += counts.sorted_swaps.size(); + smallest_number = std::min(smallest_number, counts.sorted_swaps.size()); + } + // Now, we've got the (joint) winner. + size_t best_index = m_counts_list.size(); + size_t num_winners = 0; + + for (size_t index = 0; index + 1 < m_counts_list.size(); ++index) { + auto& counts = m_counts_list[index]; + REQUIRE(counts.sorted_swaps.size() >= smallest_number); + if (counts.sorted_swaps.size() == smallest_number) { + ++counts.problems_where_this_was_the_joint_winner; + ++num_winners; + best_index = index; + } + } + REQUIRE(num_winners >= 1); + if (num_winners == 1) { + ++m_counts_list[best_index].problems_where_this_was_the_clear_winner; + } +} + +FullTsaTesting::FullTsaTesting() { + m_counts_list.resize(7); + for (auto& entry : m_counts_list) { + entry.total_swaps = 0; + } +} + +void FullTsaTesting::add_problems( + const Architecture& arch, const vector& problems, + const std::string& new_name, RNG& rng, PartialTsaInterface& full_tsa) { + m_number_of_problems += problems.size(); + const std::string name_for_this = new_name + ":" + full_tsa.name(); + if (m_name.empty()) { + m_name = name_for_this; + } else { + if (m_name != name_for_this) { + m_name = m_name + ":" + name_for_this; + } + } + const ArchitectureMapping arch_mapping(arch); + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + RiverFlowPathFinder path_finder(distances, neighbours, rng); + vector raw_calc_swaps; + VertexMapping problem_copy_to_destroy; + + for (size_t prob_index = 0; prob_index < problems.size(); ++prob_index) { + const auto& problem = problems[prob_index]; + const auto lower_bound = get_swaps_lower_bound(problem, distances); + m_number_of_tokens += problem.size(); + m_total_lower_bounds += lower_bound; + problem_copy_to_destroy = problem; + m_swap_list.clear(); + rng.set_seed(); + full_tsa.append_partial_solution( + m_swap_list, problem_copy_to_destroy, distances, neighbours, + path_finder); + raw_calc_swaps = m_swap_list.to_vector(); + + // Now, let's check the calculated swaps. + check_solution(0, problem, lower_bound, AllowEmptySwaps::NO); + + // Minimal travel optimising + m_optimiser.optimise_pass_with_zero_travel(m_swap_list); + check_solution(1, problem, lower_bound, AllowEmptySwaps::NO); + test_order(1, 0); + + //...add artificial token tracking...(remembering that empty swaps + // can be introduced, since it knows nothing about our tokens). + m_optimiser.optimise_pass_with_token_tracking(m_swap_list); + check_solution(2, problem, lower_bound, AllowEmptySwaps::YES); + test_order(2, 1); + + m_optimiser.optimise_pass_remove_empty_swaps(m_swap_list, problem); + check_solution(3, problem, lower_bound, AllowEmptySwaps::NO); + test_order(3, 2); + + m_optimiser.full_optimise(m_swap_list, problem); + check_solution(4, problem, lower_bound, AllowEmptySwaps::NO); + test_order(4, 3); + + // Now, test various equalities. + + // The token tracking pass, by itself, is the same whether or not + // we zero travel optimise first (which just makes things faster, + // not better). + m_swap_list.clear(); + for (auto swap : raw_calc_swaps) { + m_swap_list.push_back(swap); + } + m_optimiser.optimise_pass_with_token_tracking(m_swap_list); + m_optimiser.optimise_pass_with_frontward_travel(m_swap_list); + // Is 5 the same as 2? No! Usually the same, but NOT always; + // e.g. a test with random trees found a small difference. + check_solution(5, problem, lower_bound, AllowEmptySwaps::YES); + + // Swap travels permute the swaps, but otherwise reduce them + // no more than zero travel. + m_swap_list.clear(); + for (auto swap : raw_calc_swaps) { + m_swap_list.push_back(swap); + } + m_optimiser.optimise_pass_with_frontward_travel(m_swap_list); + check_equivalent_good_solution(1, problem, AllowEmptySwaps::NO); + + // full optimise is no better when combined + // with other passes. + m_swap_list.clear(); + for (auto swap : raw_calc_swaps) { + m_swap_list.push_back(swap); + } + m_optimiser.full_optimise(m_swap_list); + check_equivalent_good_solution(2, problem, AllowEmptySwaps::YES); + m_optimiser.optimise_pass_with_token_tracking(m_swap_list); + check_equivalent_good_solution(2, problem, AllowEmptySwaps::YES); + + m_swap_list.clear(); + for (auto swap : raw_calc_swaps) { + m_swap_list.push_back(swap); + } + m_optimiser.full_optimise(m_swap_list, problem); + check_equivalent_good_solution(4, problem, AllowEmptySwaps::NO); + + complete_counts_list_for_single_problem(); + } +} + +std::string FullTsaTesting::str() const { + std::stringstream ss; + ss << "[" << m_name << ": " << m_number_of_problems << " probs; " + << m_number_of_tokens << " toks; " << m_total_lower_bounds + << " tot.lb]\n[Total swaps:"; + // The last entry is a "dummy". + for (size_t index = 0; index + 1 < m_counts_list.size(); ++index) { + ss << " " << m_counts_list[index].total_swaps; + } + ss << "]\n[Winners: joint:"; + for (size_t index = 0; index + 1 < m_counts_list.size(); ++index) { + ss << " " << m_counts_list[index].problems_where_this_was_the_joint_winner; + } + ss << " undisputed:"; + for (size_t index = 0; index + 1 < m_counts_list.size(); ++index) { + ss << " " << m_counts_list[index].problems_where_this_was_the_clear_winner; + } + ss << "]"; + return ss.str(); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp new file mode 100644 index 0000000000..29c17474fb --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp @@ -0,0 +1,74 @@ +#ifndef _TKET_TESTS_TokenSwapping_TestUtils_FullTsaTesting_H_ +#define _TKET_TESTS_TokenSwapping_TestUtils_FullTsaTesting_H_ + +#include "Architecture/Architectures.hpp" +#include "TokenSwapping/PartialTsaInterface.hpp" +#include "TokenSwapping/RNG.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +// Only for testing FULL TSAs, which guarantee to find a solution. +class FullTsaTesting { + public: + FullTsaTesting(); + + /// Will use the RiverFlowPathFinder + /// (which needs an RNG). + void add_problems( + const Architecture& arch, const std::vector& problems, + const std::string& name, RNG& rng, PartialTsaInterface& full_tsa); + + /// A summary of the statistics. + std::string str() const; + + private: + // For various optimisation passes, we check how well they did, + // and we record when a particular one beats + struct Counts { + size_t total_swaps = 0; + size_t problems_where_this_was_the_joint_winner = 0; + size_t problems_where_this_was_the_clear_winner = 0; + + // Reset this with each new calculated solution; this checks whether + // newly calculated solutions really are just a permutation of an existing + // solution. + std::vector sorted_swaps; + }; + + size_t m_total_lower_bounds = 0; + size_t m_number_of_problems = 0; + size_t m_number_of_tokens = 0; + SwapList m_swap_list; + SwapListOptimiser m_optimiser; + std::vector m_counts_list; + std::string m_name; + std::string m_prev_tsa_name; + + enum class AllowEmptySwaps { YES, NO }; + + // Check that the swaps currently stored in m_swap_list are correct, + // and store the data in m_counts_list (if the index is not too big). + void check_solution( + size_t counts_list_index, VertexMapping vertex_mapping, + size_t lower_bound, AllowEmptySwaps allow_empty_swaps); + + // Check that the swaps currently stored in m_swap_list are correct. + // Check also that they are a reordering of those + // already calculated and stored in m_counts_list, at the given index. + void check_equivalent_good_solution( + size_t existing_index, VertexMapping vertex_mapping, + AllowEmptySwaps allow_empty_swaps); + + // In m_counts_list, the swaps for i1 should be <= the swaps for i2. + void test_order(size_t index1, size_t index2) const; + + void complete_counts_list_for_single_problem(); +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp new file mode 100644 index 0000000000..d826768fb2 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp @@ -0,0 +1,138 @@ +#include "PartialTsaTesting.hpp" + +#include + +#include "TestStatsStructs.hpp" +#include "TokenSwapping/ArchitectureMapping.hpp" +#include "TokenSwapping/DistancesFromArchitecture.hpp" +#include "TokenSwapping/NeighboursFromArchitecture.hpp" +#include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "TokenSwapping/TSAUtils/DistanceFunctions.hpp" +#include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +// Also checks if an empty token pair swap occurs. +static size_t get_recalculated_final_L( + VertexMapping problem, const SwapList& swap_list, + DistancesInterface& distances, TokenOption token_option) { + bool empty_tok_swap = false; + + for (auto id_opt = swap_list.front_id(); id_opt; + id_opt = swap_list.next(id_opt)) { + const auto swap = swap_list.at(id_opt.value()); + const VertexSwapResult swap_result(swap, problem); + if (swap_result.tokens_moved == 0 && + token_option == TokenOption::DO_NOT_ALLOW_EMPTY_TOKEN_SWAP) { + empty_tok_swap = true; + } + } + if (empty_tok_swap) { + REQUIRE(false); + } + return get_total_home_distances(problem, distances); +} + +static void check_progress( + size_t init_L, size_t final_L, RequiredTsaProgress progress) { + REQUIRE(final_L <= init_L); + switch (progress) { + case RequiredTsaProgress::FULL: + REQUIRE(final_L == 0); + return; + case RequiredTsaProgress::NONZERO: + if (init_L > 0) { + REQUIRE(final_L < init_L); + } + // Fall through + case RequiredTsaProgress::NONE: + return; + default: + REQUIRE(false); + } +} + +static std::string run_tests( + const std::vector& problems, DistancesInterface& distances, + NeighboursInterface& neighbours, PathFinderInterface& path_finder, + PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, + TokenOption token_option) { + REQUIRE(!problems.empty()); + PartialTsaStatistics statistics; + SwapList swap_list; + + for (const auto& problem : problems) { + const auto init_L = get_total_home_distances(problem, distances); + swap_list.clear(); + + // Will be destructively altered + auto problem_copy = problem; + path_finder.reset(); + partial_tsa.append_partial_solution( + swap_list, problem_copy, distances, neighbours, path_finder); + + const auto final_L = get_total_home_distances(problem_copy, distances); + check_progress(init_L, final_L, progress); + + REQUIRE( + get_recalculated_final_L(problem, swap_list, distances, token_option) == + final_L); + + statistics.add_problem_result( + init_L, final_L, problem.size(), swap_list.size()); + } + std::stringstream ss; + ss << "[TSA=" << partial_tsa.name(); + switch (progress) { + case RequiredTsaProgress::FULL: + ss << " FULL"; + break; + + case RequiredTsaProgress::NONZERO: + ss << " NONZERO"; + break; + + // Fall through + case RequiredTsaProgress::NONE: + default: + break; + } + ss << " PF=" << path_finder.name() << "\n" + << statistics.str(problems.size()) << "]"; + return ss.str(); +} + +std::string run_tests( + const Architecture& arch, const std::vector& problems, + PathFinderInterface& path_finder, PartialTsaInterface& partial_tsa, + RequiredTsaProgress progress, TokenOption token_option) { + const ArchitectureMapping arch_mapping(arch); + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + return run_tests( + problems, distances, neighbours, path_finder, partial_tsa, progress, + token_option); +} + +std::string run_tests( + const Architecture& arch, const std::vector& problems, + RNG& rng, PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, + TokenOption token_option) { + const ArchitectureMapping arch_mapping(arch); + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + RiverFlowPathFinder path_finder(distances, neighbours, rng); + + return run_tests( + problems, distances, neighbours, path_finder, partial_tsa, progress, + token_option); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp new file mode 100644 index 0000000000..f8605aa209 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp @@ -0,0 +1,35 @@ +#ifndef _TKET_TESTS_TokenSwapping_TestUtils_PartialTsaTesting_H_ +#define _TKET_TESTS_TokenSwapping_TestUtils_PartialTsaTesting_H_ + +#include "Architecture/Architectures.hpp" +#include "TokenSwapping/PartialTsaInterface.hpp" +#include "TokenSwapping/RNG.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +enum class RequiredTsaProgress { NONE, FULL, NONZERO }; +enum class TokenOption { + ALLOW_EMPTY_TOKEN_SWAP, + DO_NOT_ALLOW_EMPTY_TOKEN_SWAP +}; + +/// Returns a summary string of the results, as well as doing the checks. +std::string run_tests( + const Architecture& arch, const std::vector& problems, + PathFinderInterface& path_finder, PartialTsaInterface& partial_tsa, + RequiredTsaProgress progress, + TokenOption token_option = TokenOption::DO_NOT_ALLOW_EMPTY_TOKEN_SWAP); + +/// If no path finder is specified, will use the RiverFlowPathFinder +/// (which needs an RNG). +std::string run_tests( + const Architecture& arch, const std::vector& problems, + RNG& rng, PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, + TokenOption token_option = TokenOption::DO_NOT_ALLOW_EMPTY_TOKEN_SWAP); + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp new file mode 100644 index 0000000000..933ffb1d22 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp @@ -0,0 +1,115 @@ +#include "ProblemGeneration.hpp" + +#include + +#include "TokenSwapping/TSAUtils/GeneralFunctions.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +TSProblemParameters00::TSProblemParameters00() + : token_density_percentage(10), + min_number_of_tokens(1), + max_number_of_tokens(10000) {} + +VertexMapping TSProblemParameters00::get_problem( + RNG& rng, unsigned number_of_vertices) const { + unsigned number_of_tokens = + (token_density_percentage * number_of_vertices) / 100; + number_of_tokens = std::max(number_of_tokens, min_number_of_tokens); + number_of_tokens = std::min(number_of_tokens, number_of_vertices); + number_of_tokens = std::min(number_of_tokens, max_number_of_tokens); + + VertexMapping vertex_mapping; + const auto tokens = get_random_set(rng, number_of_tokens, number_of_vertices); + const auto targets_set = + get_random_set(rng, number_of_tokens, number_of_vertices); + REQUIRE(tokens.size() == number_of_tokens); + REQUIRE(targets_set.size() == number_of_tokens); + vector targets{targets_set.cbegin(), targets_set.cend()}; + for (auto token : tokens) { + vertex_mapping[token] = rng.get_and_remove_element(targets); + } + REQUIRE(targets.empty()); + REQUIRE(vertex_mapping.size() == number_of_tokens); + return vertex_mapping; +} + +ProblemGenerator00::ProblemGenerator00() + : init_token_density_percentage(1), final_percentage(100), step(1) {} + +vector ProblemGenerator00::get_problems( + const std::string& arch_name, const Architecture& arch, RNG& rng, + // It will calculate a short summary string of the problems + // and check against this string; this helps to detect + // accidentally changed parameters/generation algorithms + // leading to different tests. + const std::string& expected_summary) const { + REQUIRE(step > 0); + const unsigned num_vertices = arch.n_uids(); + TSProblemParameters00 params; + vector vertex_mappings; + + // This will probably detect if the rng changes, or has different seed + auto code = rng.get_size_t(255); + unsigned tokens_count = 0; + for (params.token_density_percentage = init_token_density_percentage; + params.token_density_percentage <= final_percentage; + params.token_density_percentage += step) { + vertex_mappings.push_back(params.get_problem(rng, num_vertices)); + tokens_count += vertex_mappings.back().size(); + } + code = (code << 8) + rng.get_size_t(255); + std::stringstream ss; + ss << "[" << arch_name << ": " << code << ": v" << num_vertices << " i" + << init_token_density_percentage << " f" << final_percentage << " s" + << step << ": " << vertex_mappings.size() << " problems; " << tokens_count + << " tokens]"; + CHECK(ss.str() == expected_summary); + return vertex_mappings; +} + +RandomTreeGenerator00::RandomTreeGenerator00() + : min_number_of_children(1), + max_number_of_children(3), + approx_number_of_vertices(10) {} + +// Creates the edges of a random tree with vertex 0 being the root. +vector> RandomTreeGenerator00::get_tree_edges( + RNG& rng) const { + REQUIRE(max_number_of_children > min_number_of_children); + REQUIRE(max_number_of_children > 1); + REQUIRE(approx_number_of_vertices >= 3); + // The vertices awaiting child nodes to be assigned. + work_vector.resize(1); + work_vector[0] = 0; + + vector> edges; + for (auto infinite_loop_guard = 100 + 100 * approx_number_of_vertices; + infinite_loop_guard > 0; --infinite_loop_guard) { + const auto number_of_children = + rng.get_size_t(min_number_of_children, max_number_of_children); + const unsigned node = rng.get_and_remove_element(work_vector); + for (unsigned ii = 0; ii < number_of_children; ++ii) { + const unsigned new_vertex = edges.size() + 1; + work_vector.push_back(new_vertex); + edges.emplace_back(node, new_vertex); + if (edges.size() + 1 >= approx_number_of_vertices) { + return edges; + } + } + if (work_vector.empty()) { + return edges; + } + } + REQUIRE(false); + return edges; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp new file mode 100644 index 0000000000..917e664e79 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp @@ -0,0 +1,66 @@ +#ifndef _TKET_TESTS_TokenSwapping_TestUtils_ProblemGeneration_H_ +#define _TKET_TESTS_TokenSwapping_TestUtils_ProblemGeneration_H_ + +#include "Architecture/Architectures.hpp" +#include "TokenSwapping/RNG.hpp" +#include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +struct TSProblemParameters00 { + // How many tokens are there, as a percentage of the number of vertices? + // Will still work if above 100, just gets truncated to 100%. + unsigned token_density_percentage; + + // For very small graphs, ensure a minimum number of tokens. + unsigned min_number_of_tokens; + unsigned max_number_of_tokens; + + TSProblemParameters00(); + + // Using the above problem parameters + VertexMapping get_problem(RNG& rng, unsigned number_of_vertices) const; +}; + +// Given an architecture, generate various test problems +// with varying numbers of tokens. +struct ProblemGenerator00 { + unsigned init_token_density_percentage; + unsigned final_percentage; + unsigned step; + + ProblemGenerator00(); + + std::vector get_problems( + const std::string& arch_name, const Architecture& arch, RNG& rng, + // It will calculate a short summary string of the problems + // and check against this string; this helps to detect + // accidentally changed parameters/generation algorithms + // leading to different tests. + const std::string& expected_summary) const; +}; + +struct RandomTreeGenerator00 { + // Every finite tree must have a leaf! + // So, some vertices will end up being leaves (having no children), + // even if the min is nonzero. + unsigned min_number_of_children; + unsigned max_number_of_children; + unsigned approx_number_of_vertices; + mutable std::vector work_vector; + + RandomTreeGenerator00(); + + // Creates the edges of a random tree with vertices {0,1,2,...} with + // vertex 0 being the root. + // It might not find exactly the requested number of vertices. + // Note that (number of vertices) == (number of edges+1), for a tree. + std::vector> get_tree_edges(RNG& rng) const; +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp new file mode 100644 index 0000000000..de6094d953 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp @@ -0,0 +1,58 @@ +#include "TestStatsStructs.hpp" + +#include +#include +#include + +; + +namespace tket { +namespace tsa_internal { +namespace tests { + +void MinMaxAv::add(size_t result) { + min = std::min(min, result); + max = std::max(max, result); + total += result; +} + +void PartialTsaStatistics::add_problem_result( + size_t initial_L, size_t final_L, size_t tokens, size_t swaps) { + REQUIRE(final_L <= initial_L); + REQUIRE(final_L + 2 * swaps >= initial_L); + total_number_of_tokens += tokens; + if (initial_L == 0) { + CHECK(swaps == 0); + l_decrease_percentages.add(100); + powers.add(100); + return; + } + ++number_of_problems; + total_of_L += initial_L; + const size_t l_decrease = initial_L - final_L; + total_of_L_decreases += l_decrease; + + l_decrease_percentages.add((100 * (initial_L - final_L)) / initial_L); + total_number_of_swaps += swaps; + if (swaps == 0) { + powers.add(0); + } else { + powers.add((50 * l_decrease) / swaps); + } +} + +std::string PartialTsaStatistics::str(size_t number_of_problems) const { + REQUIRE(number_of_problems != 0); + std::stringstream ss; + ss << total_number_of_tokens << " tokens; " << total_of_L << " total L; " + << total_number_of_swaps << " swaps.\nL-decr %: min " + << l_decrease_percentages.min << ", max " << l_decrease_percentages.max + << ", av " << l_decrease_percentages.total / number_of_problems + << ".\nPower %: min " << powers.min << ", max " << powers.max << ", av " + << powers.total / number_of_problems; + return ss.str(); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp new file mode 100644 index 0000000000..8cd1f3afe3 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp @@ -0,0 +1,44 @@ +#ifndef _TKET_TESTS_TokenSwapping_TestUtils_TestStatsStructs_H_ +#define _TKET_TESTS_TokenSwapping_TestUtils_TestStatsStructs_H_ + +#include +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +struct MinMaxAv { + size_t min = std::numeric_limits::max(); + size_t max = 0; + size_t total = 0; + + void add(size_t result); +}; + +struct PartialTsaStatistics { + size_t number_of_problems = 0; + size_t total_of_L = 0; + size_t total_of_L_decreases = 0; + size_t total_number_of_tokens = 0; + size_t total_number_of_swaps = 0; + + MinMaxAv l_decrease_percentages; + + // The "power" of a swap sequence (with given token configuration) + // is defined to be (decrease in L)/(number of swaps). + // Thus, it's always between 0 and 2 (if all swaps make progress). + // However, we multiply by 50, to make the power between 0 and 100%. + MinMaxAv powers; + + void add_problem_result( + size_t initial_L, size_t final_L, size_t tokens, size_t swaps); + + std::string str(size_t number_of_problems) const; +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket +#endif diff --git a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp new file mode 100644 index 0000000000..30d6cd6a69 --- /dev/null +++ b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp @@ -0,0 +1,95 @@ +#include +#include + +#include "TokenSwapping/ArchitectureMapping.hpp" +#include "TokenSwapping/DistancesFromArchitecture.hpp" +#include "TokenSwapping/NeighboursFromArchitecture.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("Simple path") { + const vector> edges{ + {111, 222}, {555, 444}, {333, 222}, {777, 666}, {333, 444}, {666, 555}}; + const unsigned n_verts = edges.size() + 1; + std::stringstream ss; + ss << "Original input edges:\n"; + for (auto edge : edges) { + ss << "(" << edge.first << "," << edge.second << ") "; + } + const Architecture arch(edges); + const ArchitectureMapping arch_mapping(arch); + + ss << "...\nEdges from arch.mapping:\n"; + for (auto edge : arch_mapping.get_edges()) { + ss << "(" << edge.first << "," << edge.second << ") "; + } + ss << "...\nVertex-to-node:"; + + for (unsigned vv = 0; vv < n_verts; ++vv) { + const auto node = arch_mapping.get_node(vv); + REQUIRE(vv == arch_mapping.get_vertex(node)); + ss << "\n" << vv << " == " << node.repr(); + } + ss << "...\nDistances:"; + + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + + for (unsigned ii = 0; ii < n_verts; ++ii) { + ss << "\n" << ii << ": ["; + for (unsigned jj = ii + 1; jj < n_verts; ++jj) { + REQUIRE(0 == distances(ii, ii)); + const auto dist = distances(ii, jj); + ss << " " << dist; + REQUIRE(dist == distances(jj, ii)); + } + ss << "]"; + } + ss << "\nNeighbours:"; + for (unsigned ii = 0; ii < n_verts; ++ii) { + ss << "\n" << ii << ": ["; + const auto& neighb = neighbours(ii); + for (auto nn : neighb) { + ss << " " << nn; + } + ss << " ]"; + } + CHECK( + ss.str() == + "Original input edges:\n" + "(111,222) (555,444) (333,222) (777,666) (333,444) (666,555) ...\n" + "Edges from arch.mapping:\n" + "(0,1) (2,3) (1,4) (5,6) (3,4) (2,6) ...\n" + "Vertex-to-node:\n" + "0 == node[111]\n" + "1 == node[222]\n" + "2 == node[555]\n" + "3 == node[444]\n" + "4 == node[333]\n" + "5 == node[777]\n" + "6 == node[666]...\n" + "Distances:\n" + "0: [ 1 4 3 2 6 5]\n" + "1: [ 3 2 1 5 4]\n" + "2: [ 1 2 2 1]\n" + "3: [ 1 3 2]\n" + "4: [ 4 3]\n" + "5: [ 1]\n" + "6: []\n" + "Neighbours:\n" + "0: [ 1 ]\n" + "1: [ 0 4 ]\n" + "2: [ 3 6 ]\n" + "3: [ 2 4 ]\n" + "4: [ 1 3 ]\n" + "5: [ 6 ]\n" + "6: [ 2 5 ]"); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp new file mode 100644 index 0000000000..1ae0ce481d --- /dev/null +++ b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp @@ -0,0 +1,306 @@ +#include + +#include "Data/FixedCompleteSolutions.hpp" +#include "Data/FixedSwapSequences.hpp" +#include "TestUtils/BestTsaTester.hpp" + +// NOTE: currently, the tests in this file (solving ~2300 complete problems +// with the BestTSA, which includes full table lookup) +// take ~25 seconds on an ordinary Windows laptop. +// +/// TODO: The swap table optimiser currently tries to optimise many segments; +/// certainly it could be cut down, experimentation is needed +/// to find how much to cut it down, without degrading solution +/// quality too much. +// + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { +struct FixedSeqsStats { + size_t equivalent_solns = 0; + size_t equivalent_solns_swaps = 0; + size_t better_solns = 0; + size_t better_solns_swaps = 0; + size_t better_solns_known_swaps = 0; + size_t better_solns_total_swap_diff = 0; + size_t better_solns_percent_decr_total = 0; + size_t worse_solns = 0; + size_t worse_solns_swaps = 0; + size_t worse_solns_known_swaps = 0; + size_t worse_solns_total_swap_diff = 0; + size_t worse_solns_percent_incr_total = 0; + + void add(size_t known_size, size_t calc_size) { + if (known_size == calc_size) { + ++equivalent_solns; + equivalent_solns_swaps += known_size; + return; + } + if (calc_size < known_size) { + ++better_solns; + better_solns_swaps += calc_size; + better_solns_known_swaps += known_size; + const auto decr = known_size - calc_size; + better_solns_total_swap_diff += decr; + better_solns_percent_decr_total += (decr * 100) / known_size; + return; + } + ++worse_solns; + worse_solns_swaps += calc_size; + worse_solns_known_swaps += known_size; + const auto incr = calc_size - known_size; + worse_solns_total_swap_diff += incr; + worse_solns_percent_incr_total += (incr * 100) / known_size; + } + + std::string str() const { + std::stringstream ss; + size_t good_soln_av_decr = 0; + if (better_solns > 0) { + good_soln_av_decr = better_solns_percent_decr_total / better_solns; + } + size_t bad_soln_av_incr = 0; + if (worse_solns > 0) { + bad_soln_av_incr = worse_solns_percent_incr_total / worse_solns; + } + + ss << "[" << equivalent_solns << " equal (" << equivalent_solns_swaps + << "); " << better_solns << " BETTER (" << better_solns_swaps << " vs " + << better_solns_known_swaps << "): av " << good_soln_av_decr + << "% decr\n" + << worse_solns << " WORSE (" << worse_solns_swaps << " vs " + << worse_solns_known_swaps << "): av " << bad_soln_av_incr << "% incr]"; + return ss.str(); + } +}; +} // namespace + +static void check_overall_percentage_improvement( + unsigned total_number_of_problems, unsigned total_calc_swaps, + unsigned total_orig_swaps, double expected_percentage) { + const double actual_decrease = + 100.0 - (100.0 * total_calc_swaps) / (double)total_orig_swaps; + if (std::abs(actual_decrease - expected_percentage) < 1e-4) { + return; + } + INFO( + "Solved " << total_number_of_problems + << " problems; known solutions have total swaps " + << total_orig_swaps << ". We calculated " << total_calc_swaps + << ", giving percentage decrease " << actual_decrease + << ". But we expected " << expected_percentage); + CHECK(false); +} + +namespace { +struct Summary { + std::string str; + unsigned total_calc_swaps; + unsigned total_orig_swaps; + unsigned total_number_of_problems; + + Summary( + const vector& encoded_swap_sequences, BestTsaTester& tester) + : total_calc_swaps(0), total_orig_swaps(0), total_number_of_problems(0) { + FixedSeqsStats stats; + for (const auto& code_str : encoded_swap_sequences) { + const DecodedProblemData data(code_str); + const auto known_size = data.swaps.size(); + REQUIRE(known_size > 0); + try { + const auto calc_soln_size = tester.get_checked_solution_size(data); + stats.add(known_size, calc_soln_size); + total_calc_swaps += calc_soln_size; + total_orig_swaps += known_size; + ++total_number_of_problems; + } catch (const std::exception& e) { + INFO( + "Swap seq encoding string '" + << code_str << "'\n...encoded " << data.swaps.size() << " swaps, " + << data.vertex_mapping.size() << " tokens on " + << data.number_of_vertices + << " vertices. Gave error: " << e.what()); + REQUIRE(false); + } + } + str = stats.str(); + } + + void check_overall_improvement(double expected_percentage) const { + check_overall_percentage_improvement( + total_number_of_problems, total_calc_swaps, total_orig_swaps, + expected_percentage); + } +}; +} // namespace + +SCENARIO("Best TSA : solve problems from fixed swap sequences") { + const FixedSwapSequences sequences; + BestTsaTester tester; + + const Summary full_seqs_summary(sequences.full, tester); + CHECK(full_seqs_summary.total_number_of_problems == 453); + CHECK( + full_seqs_summary.str == + "[248 equal (6088); 104 BETTER (4645 vs 4979): av 7% decr\n" + "101 WORSE (5893 vs 5451): av 8% incr]"); + + // The fixed swap sequences have been optimised quite a lot already, + // so are probably quite close to optimal. + full_seqs_summary.check_overall_improvement(-0.653832); + + const Summary partial_seqs_summary(sequences.partial, tester); + CHECK(partial_seqs_summary.total_number_of_problems == 755); + CHECK( + partial_seqs_summary.str == + "[455 equal (6487); 165 BETTER (7044 vs 7457): av 7% decr\n" + "135 WORSE (9124 vs 8604): av 6% incr]"); + + partial_seqs_summary.check_overall_improvement(-0.474543); +} + +// Now we want to solve complete problems; this is one of +// our most important tests. It is a bit silly +// to put problems with 5 vertices and problems with +// 50 vertices in the same test. Therefore, we crudely sort by length of +// encoding string, which is roughly "problem size", +// and distribute the final statistics amongst a number of categories +// based upon problem size. +namespace { +class StatisticsGrouper { + public: + StatisticsGrouper( + unsigned number_of_messages, + const vector& sorted_problem_sizes) { + REQUIRE(number_of_messages >= 3); + REQUIRE(sorted_problem_sizes.size() >= 5 * number_of_messages); + REQUIRE(sorted_problem_sizes[0] >= 5); + m_stats.resize(number_of_messages); + m_problem_size_boundaries.resize(number_of_messages); + const unsigned step = sorted_problem_sizes.size() / number_of_messages; + for (unsigned ii = 0; ii + 1 < number_of_messages; ++ii) { + m_problem_size_boundaries[ii] = sorted_problem_sizes[(ii + 1) * step]; + } + m_problem_size_boundaries.back() = sorted_problem_sizes.back() + 1; + } + + void add( + const std::string& problem_str, + const DecodedArchitectureData& arch_data) { + unsigned allowed_index = 0; + for (unsigned index = 0; index < m_problem_size_boundaries.size(); + ++index) { + if (problem_str.size() <= m_problem_size_boundaries[index]) { + allowed_index = index; + break; + } + } + // Now we know which category it's in, so do the calculation + auto& stats = m_stats[allowed_index]; + const DecodedProblemData data( + problem_str, DecodedProblemData::RequireContiguousVertices::NO); + const auto known_size = data.swaps.size(); + REQUIRE(known_size > 0); + try { + const auto calc_soln_size = + m_tester.get_checked_solution_size(data, arch_data); + stats.add(known_size, calc_soln_size); + m_total_calc_swaps += calc_soln_size; + m_total_orig_swaps += known_size; + ++m_total_number_of_problems; + } catch (const std::exception& e) { + INFO( + "Swap seq encoding string '" << problem_str << "'\n...encoded " + << data.swaps.size() + << " swaps, error: " << e.what()); + CHECK(false); + } + } + + vector get_final_messages() const { + vector messages(m_stats.size()); + for (unsigned ii = 0; ii < m_stats.size(); ++ii) { + messages[ii] = m_stats[ii].str(); + } + return messages; + } + + void check_overall_improvement(double expected_percentage) const { + check_overall_percentage_improvement( + m_total_number_of_problems, m_total_calc_swaps, m_total_orig_swaps, + expected_percentage); + } + + private: + unsigned m_total_calc_swaps = 0; + unsigned m_total_orig_swaps = 0; + unsigned m_total_number_of_problems = 0; + BestTsaTester m_tester; + vector m_stats; + vector m_problem_size_boundaries; +}; +} // namespace + +SCENARIO("Best TSA : solve complete problems") { + const FixedCompleteSolutions complete_solutions; + + // For a good test, very different problems should not be amalgamated + // in the statistics. Thus we determine the different categories using length + // of encoding string, which presumably roughly corresponds to "problem size" + // and problem hardness. + const vector expected_messages{ + "[210 equal (1018); 19 BETTER (84 vs 111): av 24% decr\n" + "2 WORSE (19 vs 15): av 26% incr]", + + "[145 equal (1822); 39 BETTER (451 vs 525): av 13% decr\n" + "17 WORSE (269 vs 242): av 11% incr]", + + "[58 equal (1619); 122 BETTER (3465 vs 3832): av 9% decr\n" + "34 WORSE (1321 vs 1232): av 6% incr]", + + "[18 equal (1382); 114 BETTER (8322 vs 8856): av 5% decr\n" + "83 WORSE (6875 vs 6457): av 5% incr]", + + "[8 equal (1470); 164 BETTER (25183 vs 27141): av 6% decr\n" + "44 WORSE (8722 vs 8384): av 3% incr]"}; + + vector problem_sizes; + for (const auto& entry : complete_solutions.solutions) { + REQUIRE(entry.second.size() >= 2); + // The first string encodes the edges in that architecture, + // rather than a problem. + for (unsigned ii = 1; ii < entry.second.size(); ++ii) { + problem_sizes.push_back(entry.second[ii].size()); + } + } + std::sort(problem_sizes.begin(), problem_sizes.end()); + StatisticsGrouper grouper(expected_messages.size(), problem_sizes); + + // Now go through the problems, let the grouper object collate the stats + // appropriately + for (const auto& entry : complete_solutions.solutions) { + const DecodedArchitectureData arch_data(entry.second[0]); + for (unsigned ii = 1; ii < entry.second.size(); ++ii) { + grouper.add(entry.second[ii], arch_data); + } + } + const auto calc_messages = grouper.get_final_messages(); + REQUIRE(calc_messages.size() == expected_messages.size()); + for (unsigned ii = 0; ii < calc_messages.size(); ++ii) { + INFO("for message[" << ii << "]: "); + CHECK(calc_messages[ii] == expected_messages[ii]); + } + // A positive result is good; the fixed complete problems are DIRECTLY + // comparing our TSA with the solver used to generate them. + grouper.check_overall_improvement(3.25087); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp new file mode 100644 index 0000000000..061b1c625a --- /dev/null +++ b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp @@ -0,0 +1,67 @@ +#include +#include +#include + +#include "TokenSwapping/DistancesFromArchitecture.hpp" + +using Catch::Matchers::Contains; +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("Architecture with disconnected graph") { + // Check that distance(v1, v2) does indeed give an error if v1, v2 are in + // different connected components. + const std::vector> edges{ + {0, 1}, {0, 2}, {1, 3}, {4, 5}}; + const size_t number_of_vertices = 6; + const Architecture arch(edges); + // Note: it's a "coincidence" that the vertex numbers are unchanged, + // because 0,1,2,3,4,5 are first seen in this order. + const ArchitectureMapping mapping(arch); + REQUIRE(mapping.number_of_vertices() == number_of_vertices); + DistancesFromArchitecture dist_calculator(mapping); + std::stringstream summary; + for (size_t v1 = 0; v1 < number_of_vertices; ++v1) { + for (size_t v2 = 0; v2 < number_of_vertices; ++v2) { + summary << "d(" << v1 << "," << v2 << ")="; + try { + const auto distance = dist_calculator(v1, v2); + summary << distance << ";"; + if (distance == 0) { + CHECK(v1 == v2); + } else { + CHECK(v1 != v2); + } + } catch (const std::exception& e) { + // 4 or 5 is involved, but not (4,5). + const bool four_or_five_occurs = + (v1 == 4 || v2 == 4 || v1 == 5 || v2 == 5); + CHECK(four_or_five_occurs); + // ...but not (4,5). + CHECK(v1 + v2 != 9); + summary << "INF;"; + const std::string message = e.what(); + CHECK_THAT(message, Contains("are not connected")); + } + } + } + CHECK( + summary.str() == + "d(0,0)=0;d(0,1)=1;d(0,2)=1;d(0,3)=2;d(0,4)=INF;d(0,5)=INF;d(1,0)=1;" + "d(1,1)=0;" + "d(1,2)=2;d(1,3)=1;d(1,4)=INF;d(1,5)=INF;d(2,0)=1;d(2,1)=2;d(2,2)=0;" + "d(2,3)=3;d" + "(2,4)=INF;d(2,5)=INF;d(3,0)=2;d(3,1)=1;d(3,2)=3;d(3,3)=0;d(3,4)=INF;" + "d(3,5)=" + "INF;d(4,0)=INF;d(4,1)=INF;d(4,2)=INF;d(4,3)=INF;d(4,4)=0;d(4,5)=1;d(" + "5,0)=INF;" + "d(5,1)=INF;d(5,2)=INF;d(5,3)=INF;d(5,4)=1;d(5,5)=0;"); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_FullTsa.cpp b/tket/tests/TokenSwapping/test_FullTsa.cpp new file mode 100644 index 0000000000..80b0a19b48 --- /dev/null +++ b/tket/tests/TokenSwapping/test_FullTsa.cpp @@ -0,0 +1,242 @@ +#include + +#include "TestUtils/FullTsaTesting.hpp" +#include "TestUtils/ProblemGeneration.hpp" +#include "TokenSwapping/HybridTsa00.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { +struct FullTester { + FullTsaTesting results; + FullTsaTesting trivial_results; + HybridTsa00 full_tsa; + TrivialTSA trivial_tsa; + RNG rng; + ProblemGenerator00 generator; + std::string test_name; + + void add_problems( + const Architecture& arch, const std::string& arch_name, + const std::string& problem_message) { + rng.set_seed(); + const auto problems = + generator.get_problems(arch_name, arch, rng, problem_message); + + // OK to reuse RNG, as it's reset before each problem. + results.add_problems(arch, problems, test_name, rng, full_tsa); + + trivial_tsa.set(TrivialTSA::Options::FULL_TSA); + trivial_results.add_problems(arch, problems, test_name, rng, trivial_tsa); + } +}; +} // namespace + +SCENARIO("Full TSA: stars") { + const vector problem_messages{ + "[Star3: 51481: v4 i1 f100 s1: 100 problems; 178 tokens]", + "[Star5: 51528: v6 i1 f100 s1: 100 problems; 270 tokens]", + "[Star10: 51662: v11 i1 f100 s1: 100 problems; 515 tokens]", + "[Star20: 51494: v21 i1 f100 s1: 100 problems; 1015 tokens]"}; + const vector num_spokes{3, 5, 10, 20}; + FullTester tester; + tester.test_name = "Stars"; + std::string arch_name; + vector> edges; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + arch_name = "Star" + std::to_string(num_spokes[index]); + edges.clear(); + for (unsigned vv = 1; vv <= num_spokes[index]; ++vv) { + edges.emplace_back(0, vv); + } + const Architecture arch(edges); + tester.add_problems(arch, arch_name, problem_messages[index]); + } + CHECK( + tester.results.str() == + "[Stars:HybridTSA_00: 400 probs; 1978 toks; 1623 tot.lb]\n" + "[Total swaps: 2632 2588 2550 2539 2539 2550]\n" + "[Winners: joint: 360 381 392 400 400 392 undisputed: 0 0 0 0 0 0]"); + + CHECK( + tester.trivial_results.str() == + "[Stars:Trivial: 400 probs; 1978 toks; 1623 tot.lb]\n" + "[Total swaps: 3968 3804 3088 3088 3088 3088]\n" + "[Winners: joint: 247 271 400 400 400 400 undisputed: 0 0 0 0 0 0]"); +} + +SCENARIO("Full TSA: wheels") { + const vector problem_messages{ + "[Wheel3: 51481: v4 i1 f100 s1: 100 problems; 178 tokens]", + "[Wheel5: 51528: v6 i1 f100 s1: 100 problems; 270 tokens]", + "[Wheel10: 51662: v11 i1 f100 s1: 100 problems; 515 tokens]", + "[Wheel20: 51494: v21 i1 f100 s1: 100 problems; 1015 tokens]"}; + + const vector num_spokes{3, 5, 10, 20}; + FullTester tester; + tester.test_name = "Wheels"; + std::string arch_name; + vector> edges; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + arch_name = "Wheel" + std::to_string(num_spokes[index]); + edges.clear(); + for (unsigned vv = 1; vv <= num_spokes[index]; ++vv) { + edges.emplace_back(0, vv); + if (vv == num_spokes[index]) { + edges.emplace_back(vv, 1); + } else { + edges.emplace_back(vv, vv + 1); + } + } + const Architecture arch(edges); + tester.add_problems(arch, arch_name, problem_messages[index]); + } + CHECK( + tester.results.str() == + "[Wheels:HybridTSA_00: 400 probs; 1978 toks; 1533 tot.lb]\n" + "[Total swaps: 2482 2462 2430 2422 2422 2430]\n" + "[Winners: joint: 374 384 395 400 400 395 undisputed: 0 0 0 0 0 0]"); + + CHECK( + tester.trivial_results.str() == + "[Wheels:Trivial: 400 probs; 1978 toks; 1533 tot.lb]\n" + "[Total swaps: 3510 3410 2818 2818 2818 2818]\n" + "[Winners: joint: 283 291 400 400 400 400 undisputed: 0 0 0 0 0 0]"); +} + +SCENARIO("Full TSA: Rings") { + const vector problem_messages{ + "[Ring3: 51582: v3 i1 f100 s1: 100 problems; 135 tokens]", + "[Ring5: 51644: v5 i1 f100 s1: 100 problems; 224 tokens]", + "[Ring10: 51634: v10 i1 f100 s1: 100 problems; 469 tokens]", + "[Ring20: 51498: v20 i1 f100 s1: 100 problems; 974 tokens]"}; + const vector num_vertices{3, 5, 10, 20}; + FullTester tester; + tester.test_name = "Rings"; + std::string arch_name; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + const RingArch arch(num_vertices[index]); + arch_name = "Ring" + std::to_string(num_vertices[index]); + tester.add_problems(arch, arch_name, problem_messages[index]); + } + CHECK( + tester.results.str() == + "[Rings:HybridTSA_00: 400 probs; 1802 toks; 3193 tot.lb]\n" + "[Total swaps: 6302 5942 5118 5115 5113 5118]\n" + "[Winners: joint: 292 328 399 399 400 399 undisputed: 0 0 0 0 1 0]"); + + CHECK( + tester.trivial_results.str() == + "[Rings:Trivial: 400 probs; 1802 toks; 3193 tot.lb]\n" + "[Total swaps: 8922 8580 5104 5087 5079 5104]\n" + "[Winners: joint: 231 252 394 397 400 394 undisputed: 0 0 0 0 3 0]"); +} + +SCENARIO("Full TSA: fully connected") { + const vector problem_messages{ + "[K3: 51582: v3 i1 f100 s1: 100 problems; 135 tokens]", + "[K5: 51644: v5 i1 f100 s1: 100 problems; 224 tokens]", + "[K10: 51634: v10 i1 f100 s1: 100 problems; 469 tokens]", + "[K20: 51498: v20 i1 f100 s1: 100 problems; 974 tokens]"}; + + const vector num_vertices{3, 5, 10, 20}; + FullTester tester; + tester.test_name = "FullyConn"; + std::string arch_name; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + const FullyConnected arch(num_vertices[index]); + arch_name = "K" + std::to_string(num_vertices[index]); + tester.add_problems(arch, arch_name, problem_messages[index]); + } + CHECK( + tester.results.str() == + "[FullyConn:HybridTSA_00: 400 probs; 1802 toks; 867 tot.lb]\n" + "[Total swaps: 1435 1435 1435 1435 1435 1435]\n" + "[Winners: joint: 400 400 400 400 400 400 undisputed: 0 0 0 0 0 0]"); + + CHECK( + tester.trivial_results.str() == + "[FullyConn:Trivial: 400 probs; 1802 toks; 867 tot.lb]\n" + "[Total swaps: 1435 1435 1435 1435 1435 1435]\n" + "[Winners: joint: 400 400 400 400 400 400 undisputed: 0 0 0 0 0 0]"); +} + +SCENARIO("Full TSA: Square Grids") { + const vector> grid_parameters = { + {2, 2, 2}, {3, 4, 4}}; + const vector problem_messages{ + "[Grid(2,2,2): 51480: v8 i1 f100 s1: 100 problems; 368 tokens]", + "[Grid(3,4,4): 51492: v48 i1 f100 s1: 100 problems; 2378 tokens]"}; + + FullTester tester; + tester.test_name = "Square grids"; + + for (size_t index = 0; index < grid_parameters.size(); ++index) { + const auto& parameters = grid_parameters[index]; + const SquareGrid arch(parameters[0], parameters[1], parameters[2]); + std::stringstream ss; + ss << "Grid(" << parameters[0] << "," << parameters[1] << "," + << parameters[2] << ")"; + + tester.add_problems(arch, ss.str(), problem_messages[index]); + } + CHECK( + tester.results.str() == + "[Square grids:HybridTSA_00: 200 probs; 2746 toks; 4323 tot.lb]\n" + "[Total swaps: 7083 7015 6863 6846 6842 6863]\n" + "[Winners: joint: 148 163 188 198 200 188 undisputed: 0 0 0 0 2 0]"); + + CHECK( + tester.trivial_results.str() == + "[Square grids:Trivial: 200 probs; 2746 toks; 4323 tot.lb]\n" + "[Total swaps: 12364 12208 9114 9039 8933 9114]\n" + "[Winners: joint: 85 91 152 177 200 152 undisputed: 0 0 0 0 23 0]"); +} + +SCENARIO("Full TSA: Random trees") { + RandomTreeGenerator00 tree_generator; + FullTester tester; + + const vector problem_messages{ + "[Tree0: 51644: v5 i1 f100 s1: 100 problems; 224 tokens]", + "[Tree1: 51517: v16 i1 f100 s1: 100 problems; 766 tokens]", + "[Tree2: 51481: v24 i1 f100 s1: 100 problems; 1168 tokens]"}; + tester.test_name = "Trees"; + std::string arch_name; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + tree_generator.min_number_of_children = index; + tree_generator.max_number_of_children = 2 + 2 * index; + tree_generator.approx_number_of_vertices = + 4 * tree_generator.max_number_of_children; + + const auto edges = tree_generator.get_tree_edges(tester.rng); + const Architecture arch(edges); + REQUIRE(arch.n_uids() == edges.size() + 1); + arch_name = "Tree" + std::to_string(index); + tester.add_problems(arch, arch_name, problem_messages[index]); + } + CHECK( + tester.results.str() == + "[Trees:HybridTSA_00: 300 probs; 2158 toks; 2963 tot.lb]\n" + "[Total swaps: 5216 5132 4844 4828 4817 4844]\n" + "[Winners: joint: 227 251 286 296 300 286 undisputed: 0 0 0 0 4 0]"); + + CHECK( + tester.trivial_results.str() == + "[Trees:Trivial: 300 probs; 2158 toks; 2963 tot.lb]\n" + "[Total swaps: 8128 7886 5592 5570 5563 5600]\n" + "[Winners: joint: 128 148 282 297 300 280 undisputed: 0 0 0 0 3 0]"); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp new file mode 100644 index 0000000000..412d4d776e --- /dev/null +++ b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp @@ -0,0 +1,254 @@ +#include +#include + +#include "TokenSwapping/ArchitectureMapping.hpp" +#include "TokenSwapping/DistancesFromArchitecture.hpp" +#include "TokenSwapping/NeighboursFromArchitecture.hpp" +#include "TokenSwapping/RNG.hpp" +#include "TokenSwapping/RiverFlowPathFinder.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { +// It is a cycle (ring) on vertices [0,1,2,..., N-1], with N ~ 0. +struct DistancesForCycle : public DistancesInterface { + size_t number_of_vertices = 10; + + virtual size_t operator()(size_t v1, size_t v2) override { + size_t distance1; + if (v1 < v2) { + distance1 = v2 - v1; + } else { + distance1 = v1 - v2; + } + const size_t distance2 = number_of_vertices - distance1; + return std::min(distance1, distance2); + } +}; + +class NeighboursForCycle : public NeighboursInterface { + public: + explicit NeighboursForCycle(size_t number_of_vertices) + : m_number_of_vertices(number_of_vertices) { + REQUIRE(number_of_vertices > 1); + if (m_number_of_vertices == 2) { + m_neighbours.resize(1); + } else { + m_neighbours.resize(2); + } + } + + virtual const vector& operator()(size_t vertex) override { + if (vertex >= m_number_of_vertices) { + throw std::runtime_error("neighbours requested for invalid vertex"); + } + m_neighbours[0] = (vertex + 1) % m_number_of_vertices; + if (m_neighbours.size() > 1) { + m_neighbours[1] = + ((vertex + m_number_of_vertices) - 1) % m_number_of_vertices; + } + return m_neighbours; + } + + private: + size_t m_number_of_vertices; + vector m_neighbours; +}; + +struct TestResult { + size_t total_number_of_path_calls = 0; + size_t total_number_of_differing_extra_paths = 0; + + std::string str() const { + std::stringstream ss; + ss << "[ Number of path calls: " << total_number_of_path_calls + << " Extra paths: " << total_number_of_differing_extra_paths << " ]"; + return ss.str(); + } +}; + +} // namespace + +static void do_simple_path_test( + const vector& path, const Swap& endpoints) { + REQUIRE(!path.empty()); + REQUIRE(path[0] == endpoints.first); + REQUIRE(path.back() == endpoints.second); + + const std::set vertices{path.cbegin(), path.cend()}; + REQUIRE(vertices.size() == path.size()); +} + +static void require_path_to_have_valid_edges( + const vector& path, NeighboursInterface& neighbours_interface) { + std::array vertices; + for (size_t ii = 0; ii + 1 < path.size(); ++ii) { + vertices[0].first = path[ii]; + vertices[0].second = path[ii + 1]; + vertices[1].first = path[ii + 1]; + vertices[1].second = path[ii]; + for (const auto& pair : vertices) { + const auto& neighbours = neighbours_interface(pair.first); + bool is_neighbour = false; + for (auto neigh : neighbours) { + if (neigh == pair.second) { + is_neighbour = true; + break; + } + } + REQUIRE(is_neighbour); + } + } +} + +static void test( + TestResult& result, RiverFlowPathFinder& path_finder, + DistancesInterface& distance_calculator, + NeighboursInterface& neighbours_calculator, size_t number_of_vertices, + RNG& rng_for_test_data, size_t number_of_test_repeats = 10) { + // We will check that calculated paths are mostly unchanged. + std::map>> calculated_paths; + + vector possible_path_calls; + possible_path_calls.reserve(number_of_vertices * number_of_vertices); + for (size_t ii = 0; ii < number_of_vertices; ++ii) { + for (size_t jj = 0; jj < number_of_vertices; ++jj) { + possible_path_calls.emplace_back(ii, jj); + calculated_paths[std::make_pair(ii, jj)]; + } + } + + // The first time a path is calculated, its length will be checked using + // the distance_calculator + const auto get_path_size = [&calculated_paths, &distance_calculator]( + const Swap& end_vertices) -> size_t { + if (end_vertices.first == end_vertices.second) { + return 1; + } + const auto& existing_paths = calculated_paths[end_vertices]; + if (!existing_paths.empty()) { + return existing_paths[0].size(); + } + const auto& reversed_existing_paths = calculated_paths[std::make_pair( + end_vertices.second, end_vertices.first)]; + + if (!reversed_existing_paths.empty()) { + return reversed_existing_paths[0].size(); + } + return 1 + distance_calculator(end_vertices.first, end_vertices.second); + }; + + for (size_t counter = number_of_test_repeats; counter > 0; --counter) { + rng_for_test_data.do_shuffle(possible_path_calls); + result.total_number_of_path_calls += possible_path_calls.size(); + + for (const Swap& end_vertices : possible_path_calls) { + const auto& calc_path = + path_finder(end_vertices.first, end_vertices.second); + + do_simple_path_test(calc_path, end_vertices); + REQUIRE(calc_path.size() == get_path_size(end_vertices)); + + auto& path_list = calculated_paths[end_vertices]; + bool found_path = false; + for (auto& path : path_list) { + if (path == calc_path) { + found_path = true; + break; + } + } + if (!found_path) { + if (!path_list.empty()) { + ++result.total_number_of_differing_extra_paths; + } + path_list.emplace_back(calc_path); + require_path_to_have_valid_edges(calc_path, neighbours_calculator); + } + } + } +} + +SCENARIO("Test path generation for cycles") { + RNG rng_for_path_generation; + RNG rng_for_test_data; + DistancesForCycle distances; + TestResult result; + + for (size_t number_of_vertices = 2; number_of_vertices <= 10; + ++number_of_vertices) { + INFO("number_of_vertices = " << number_of_vertices); + distances.number_of_vertices = number_of_vertices; + NeighboursForCycle neighbours(number_of_vertices); + RiverFlowPathFinder path_finder( + distances, neighbours, rng_for_path_generation); + + const auto current_differing_paths = + result.total_number_of_differing_extra_paths; + test( + result, path_finder, distances, neighbours, number_of_vertices, + rng_for_test_data); + + // Even cycles have non-unique paths, for polar opposite vertices; + // odd cycles do not. + if (number_of_vertices % 2 == 1) { + // No extra paths were created. + CHECK( + current_differing_paths == + result.total_number_of_differing_extra_paths); + } + } + REQUIRE(result.str() == "[ Number of path calls: 3840 Extra paths: 3 ]"); +} + +// Deliberately use the same RNG, so it's all mixed up; +// but we still expect not so many different paths. +static void test(TestResult& result, const Architecture& arch, RNG& rng) { + const ArchitectureMapping arch_mapping(arch); + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + RiverFlowPathFinder path_finder(distances, neighbours, rng); + + test( + result, path_finder, distances, neighbours, + arch_mapping.number_of_vertices(), rng); +} + +SCENARIO("Path generation for complete graph") { + RNG rng; + TestResult result; + const FullyConnected arch(5); + test(result, arch, rng); + REQUIRE(result.str() == "[ Number of path calls: 250 Extra paths: 0 ]"); +} + +SCENARIO("Path generation for ring graph") { + RNG rng; + TestResult result; + const RingArch arch(7); + test(result, arch, rng); + REQUIRE(result.str() == "[ Number of path calls: 490 Extra paths: 0 ]"); +} + +SCENARIO("Path generation for square grids") { + RNG rng; + TestResult result; + for (size_t ver = 2; ver <= 4; ver += 2) { + for (size_t hor = 1; hor <= 5; hor += 2) { + for (size_t layer = 1; layer <= 3; layer += 2) { + const SquareGrid arch(ver, hor, layer); + INFO("Square grid: " << ver << ", " << hor << ", " << layer); + test(result, arch, rng); + } + } + } + REQUIRE(result.str() == "[ Number of path calls: 70000 Extra paths: 583 ]"); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_SwapList.cpp b/tket/tests/TokenSwapping/test_SwapList.cpp new file mode 100644 index 0000000000..ba660b724c --- /dev/null +++ b/tket/tests/TokenSwapping/test_SwapList.cpp @@ -0,0 +1,41 @@ +#include +#include +#include + +#include "TokenSwapping/TSAUtils/SwapFunctions.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +std::string get_swaps_str(const SwapList& swap_list) { + std::stringstream ss; + const auto svect = swap_list.to_vector(); + ss << "[" << svect.size() << " swaps:"; + for (auto swap : svect) { + ss << " (" << swap.first << " " << swap.second << ") "; + } + ss << "]"; + return ss.str(); +} + +SCENARIO("simple swap list") { + SwapList swap_list; + CHECK(get_swaps_str(swap_list) == "[0 swaps:]"); + swap_list.clear(); + CHECK(get_swaps_str(swap_list) == "[0 swaps:]"); + swap_list.push_front(get_swap(0, 1)); + CHECK(get_swaps_str(swap_list) == "[1 swaps: (0 1) ]"); + const auto current_front = swap_list.front_id().value(); + const auto new_front = swap_list.emplace_front(); + CHECK(current_front != new_front); + CHECK(new_front == swap_list.front_id().value()); + swap_list.front() = get_swap(998, 999); + CHECK(get_swaps_str(swap_list) == "[2 swaps: (998 999) (0 1) ]"); + swap_list.pop_front(); + CHECK(get_swaps_str(swap_list) == "[1 swaps: (0 1) ]"); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp new file mode 100644 index 0000000000..12cb570cb2 --- /dev/null +++ b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp @@ -0,0 +1,482 @@ +#include +#include +#include +#include + +#include "TokenSwapping/RNG.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" +#include "TokenSwapping/TSAUtils/DebugFunctions.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { + +// Only checks that swaps are correct, doesn't measure how good they are +class SwapCorrectnessTester { + public: + // Perform the raw swaps for comparison. + void reset(const vector& raw_swaps) { + m_final_tracker.reset(); + for (const auto& swap : raw_swaps) { + (void)m_final_tracker.do_vertex_swap(swap); + } + m_number_of_raw_swaps = raw_swaps.size(); + } + + void require_equal_permutations(const SwapList& swap_list) const { + m_tracker_to_change.reset(); + size_t num_swaps = 0; + for (auto id = swap_list.front_id(); id; id = swap_list.next(id.value())) { + m_tracker_to_change.do_vertex_swap(swap_list.at(id.value())); + ++num_swaps; + } + REQUIRE(num_swaps == swap_list.size()); + REQUIRE(m_tracker_to_change.equal_vertex_permutation_from_swaps( + m_final_tracker)); + REQUIRE(m_number_of_raw_swaps >= num_swaps); + } + + private: + size_t m_number_of_raw_swaps = 0; + DynamicTokenTracker m_final_tracker; + mutable DynamicTokenTracker m_tracker_to_change; +}; + +// As well as correctness, also checks that optimisation passes +// do actually perform quite well. +class SwapTester { + public: + SwapTester() { + m_optimisation_functions.reserve(5); + + m_optimisation_functions.emplace_back([](const vector& raw_swaps, + SwapList& list, + SwapListOptimiser& optimiser) { + for (const Swap& swap : raw_swaps) { + optimiser.push_back(list, swap); + } + }); + m_optimisation_functions.emplace_back( + [](const vector&, SwapList& list, SwapListOptimiser& optimiser) { + optimiser.optimise_pass_with_zero_travel(list); + }); + m_optimisation_functions.emplace_back( + [](const vector&, SwapList& list, SwapListOptimiser& optimiser) { + optimiser.optimise_pass_with_frontward_travel(list); + }); + m_optimisation_functions.emplace_back( + [](const vector&, SwapList& list, SwapListOptimiser& optimiser) { + optimiser.optimise_pass_with_token_tracking(list); + }); + m_optimisation_functions.emplace_back( + [](const vector&, SwapList& list, SwapListOptimiser& optimiser) { + optimiser.full_optimise(list); + }); + reset_counters(); + } + + void reset_counters() { + // Also includes the number of raw swaps, + // and the number of tests. + m_counts.resize(m_optimisation_functions.size() + 2); + std::fill(m_counts.begin(), m_counts.end(), 0); + } + + void test(const vector& raw_swaps) { + ++m_counts[0]; + m_counts[1] += raw_swaps.size(); + m_correctness_tester.reset(raw_swaps); + + for (size_t ii = 0; ii < m_optimisation_functions.size(); ++ii) { + m_swap_list.clear(); + if (ii != 0) { + for (const auto& swap : raw_swaps) { + m_swap_list.push_back(swap); + } + } + m_optimisation_functions[ii](raw_swaps, m_swap_list, m_optimiser); + m_correctness_tester.require_equal_permutations(m_swap_list); + m_counts[ii + 2] += m_swap_list.size(); + } + } + + std::string get_final_result() const { + std::stringstream ss; + ss << "[ " << m_counts[0] << " tests; swap counts:"; + for (size_t ii = 1; ii < m_counts.size(); ++ii) { + ss << " " << m_counts[ii] << " "; + } + ss << "]"; + return ss.str(); + } + + private: + vector< + std::function&, SwapList&, SwapListOptimiser&)>> + m_optimisation_functions; + + vector m_counts; + + SwapList m_swap_list; + SwapListOptimiser m_optimiser; + SwapCorrectnessTester m_correctness_tester; + size_t number_of_tests; +}; +} // namespace + +SCENARIO("Random swaps are optimised") { + RNG rng; + SwapTester tester; + vector raw_swaps; + const vector num_vertices{5, 10, 20}; + + // We will multiply the number of possible distinct swaps + // by these numbers, then divide by 100, to determine how many swaps + // to generate for the test. + const vector percentages{50, 100, 200, 500}; + + // Not necessarily contiguous. + std::set vertices_set; + + for (size_t number_of_vertices : num_vertices) { + const size_t possible_swaps = + (number_of_vertices * (number_of_vertices - 1)) / 2; + for (auto percent : percentages) { + const size_t num_swaps = (possible_swaps * percent) / 100; + vertices_set.clear(); + + for (size_t ii = 0; ii < number_of_vertices; ++ii) { + vertices_set.insert(ii); + } + const vector vertices(vertices_set.cbegin(), vertices_set.cend()); + for (int test_counter = 0; test_counter < 1; ++test_counter) { + INFO( + "test_counter=" << test_counter << ", number_of_vertices=" + << number_of_vertices << ", percent=" << percent); + + for (size_t jj = 0; jj < num_swaps; ++jj) { + const auto v1 = rng.get_element(vertices); + auto v2 = v1; + while (v1 == v2) { + v2 = rng.get_element(vertices); + } + raw_swaps.emplace_back(get_swap(v1, v2)); + } + tester.test(raw_swaps); + } + } + } + CHECK( + tester.get_final_result() == + "[ 12 tests; swap counts: 5636 5256 4976 4976 264 268 ]"); +} + +namespace { +// The above test just generates completely random swap sequences +// on N vertices. For a more realistic sequence, we try choosing them +// from a smaller list of possible swaps +// (thus, representing swaps on an incomplete graph). +// This might be more realistic. +struct EdgesGenerator { + std::set swaps_set; + size_t approx_num_vertices = 5; + size_t approx_num_edges = 10; + size_t percentage_to_add_new_vertex = 50; + + vector get_swaps(RNG& rng, size_t& actual_num_vertices) { + actual_num_vertices = 2; + swaps_set.clear(); + swaps_set.insert(get_swap(0, 1)); + + for (size_t counter = 10 * approx_num_edges; counter > 0; --counter) { + if (actual_num_vertices >= approx_num_vertices || + swaps_set.size() >= approx_num_edges) { + break; + } + bool add_new_vertex = rng.check_percentage(percentage_to_add_new_vertex); + if (!add_new_vertex) { + const auto current_edges = swaps_set.size(); + for (int edge_attempt = 10; edge_attempt > 0; --edge_attempt) { + const auto v1 = rng.get_size_t(actual_num_vertices - 1); + const auto v2 = rng.get_size_t(actual_num_vertices - 1); + if (v1 != v2) { + swaps_set.insert(get_swap(v1, v2)); + if (current_edges != swaps_set.size()) { + break; + } + } + } + if (current_edges != swaps_set.size()) { + continue; + } + add_new_vertex = true; + } + if (add_new_vertex) { + swaps_set.insert(get_swap( + rng.get_size_t(actual_num_vertices - 1), actual_num_vertices)); + ++actual_num_vertices; + continue; + } + } + vector result{swaps_set.cbegin(), swaps_set.cend()}; + return result; + } +}; + +struct ManyTestsRunner { + SwapTester tester; + + EdgesGenerator swaps_generator; + vector possible_swaps; + size_t actual_num_vertices; + vector raw_swaps; + + void run( + RNG& rng, const vector& approx_num_vertices, + const vector& approx_num_edges_percentages, + const vector& swap_length_percentages, + size_t num_tests_per_parameter_list) { + for (auto approx_nv : approx_num_vertices) { + swaps_generator.approx_num_vertices = approx_nv; + for (auto approx_nep : approx_num_edges_percentages) { + swaps_generator.approx_num_edges = + approx_nv / 2 + (approx_nv * (approx_nv - 1) * approx_nep) / 200; + for (size_t num_graphs = 0; num_graphs < 1; ++num_graphs) { + possible_swaps = swaps_generator.get_swaps(rng, actual_num_vertices); + for (auto slp : swap_length_percentages) { + const size_t swap_list_length = + 1 + (possible_swaps.size() * slp) / 100; + for (size_t test_counter = 0; + test_counter < num_tests_per_parameter_list; ++test_counter) { + raw_swaps.clear(); + for (size_t nn = 0; nn < swap_list_length; ++nn) { + raw_swaps.push_back(rng.get_element(possible_swaps)); + } + tester.test(raw_swaps); + } + } + } + } + } + } +}; +} // namespace + +SCENARIO("More realistic swap sequences") { + RNG rng; + const size_t num_tests_per_parameter_list = 10; + + // How many edges should we aim for, as a rough percentage of + // the total number n(n-1)/2 of possibilities? + const vector approx_num_edges_percentages{5, 10, 20, 30, 40, 80}; + + // How long should the swap length be, as a percentage of the + // total possible number of swaps? + const vector swap_length_percentages{50, 100, 200}; + + { + const vector approx_num_vertices{5, 8}; + ManyTestsRunner runner; + runner.run( + rng, approx_num_vertices, approx_num_edges_percentages, + swap_length_percentages, num_tests_per_parameter_list); + CHECK( + runner.tester.get_final_result() == + "[ 360 tests; swap counts: 3160 2380 2104 2104 1396 1406 ]"); + } + { + const vector approx_num_vertices{10, 12, 14}; + ManyTestsRunner runner; + runner.run( + rng, approx_num_vertices, approx_num_edges_percentages, + swap_length_percentages, num_tests_per_parameter_list); + CHECK( + runner.tester.get_final_result() == + "[ 540 tests; swap counts: 10370 9048 7580 7580 5180 5216 ]"); + } + { + const vector approx_num_vertices{30, 35, 40}; + ManyTestsRunner runner; + runner.run( + rng, approx_num_vertices, approx_num_edges_percentages, + swap_length_percentages, num_tests_per_parameter_list); + CHECK( + runner.tester.get_final_result() == + "[ 540 tests; swap counts: 38900 37626 30944 30944 24714 " + "24720 ]"); + } +} + +// If we perform a sequence of swaps, then again in reverse order, +// (and thus, make a palindrome), it ALWAYS equals the identity permutation. +// (Of course, odd-length palindromes like "(0,1)" do NOT give the identity!) +// It seems "obvious" that zero-travel and frontwards-travel passes +// should optimise (even-length) palindromes to zero; but is it actually true?! +// Token-tracking passes definitely do NOT, but counterexamples are rare. +// (Even though token-tracking IRREDUCIBILITY can be shown to be +// STRICTLY STRONGER than zero-travel or frontwards-travel IRREDUCIBILITY!) +SCENARIO("Trivial swap list reversed order optimisation; pass comparisons") { + vector possible_swaps; + const unsigned num_vertices = 4; + + for (unsigned ii = 0; ii < num_vertices; ++ii) { + for (unsigned jj = ii + 1; jj < num_vertices; ++jj) { + possible_swaps.push_back(get_swap(ii, jj)); + } + } + vector raw_swaps; + SwapList swaps; + SwapListOptimiser optimiser; + + const auto push_back_swaps = [&raw_swaps, &swaps]() { + swaps.fast_clear(); + for (auto swap : raw_swaps) { + swaps.push_back(swap); + } + }; + + const auto concatenate_reversed_swaps = [&raw_swaps, &swaps, + &push_back_swaps]() { + push_back_swaps(); + for (auto citer = raw_swaps.crbegin(); citer != raw_swaps.crend(); + ++citer) { + swaps.push_back(*citer); + } + }; + + size_t simple_travel_equals_token_tracking_count = 0; + size_t simple_travel_beats_token_tracking_count = 0; + size_t simple_travel_beaten_by_token_tracking_count = 0; + size_t full_optimise_fully_reduces_palindrome = 0; + size_t full_optimise_does_not_destroy_palindrome = 0; + size_t token_tracking_pass_fully_reduces_palindrome = 0; + size_t token_tracking_pass_does_not_destroy_palindrome = 0; + + RNG rng; + + for (int test_counter = 0; test_counter < 1000; ++test_counter) { + if (raw_swaps.size() > 20) { + raw_swaps.clear(); + } + raw_swaps.push_back(rng.get_element(possible_swaps)); + + concatenate_reversed_swaps(); + optimiser.optimise_pass_with_zero_travel(swaps); + CHECK(swaps.size() == 0); + + concatenate_reversed_swaps(); + optimiser.optimise_pass_with_frontward_travel(swaps); + CHECK(swaps.size() == 0); + + concatenate_reversed_swaps(); + optimiser.optimise_pass_with_token_tracking(swaps); + if (swaps.size() == 0) { + ++token_tracking_pass_fully_reduces_palindrome; + } else { + ++token_tracking_pass_does_not_destroy_palindrome; + } + + concatenate_reversed_swaps(); + optimiser.full_optimise(swaps); + if (swaps.size() == 0) { + ++full_optimise_fully_reduces_palindrome; + } else { + ++full_optimise_does_not_destroy_palindrome; + } + + push_back_swaps(); + optimiser.optimise_pass_with_zero_travel(swaps); + const auto zero_travel_reduced_size = swaps.size(); + + push_back_swaps(); + optimiser.optimise_pass_with_frontward_travel(swaps); + const auto frontward_travel_reduced_size = swaps.size(); + CHECK(zero_travel_reduced_size == frontward_travel_reduced_size); + + push_back_swaps(); + optimiser.optimise_pass_with_token_tracking(swaps); + + const auto token_tracking_reduced_size = swaps.size(); + if (token_tracking_reduced_size == zero_travel_reduced_size) { + ++simple_travel_equals_token_tracking_count; + } else { + if (token_tracking_reduced_size < zero_travel_reduced_size) { + ++simple_travel_beaten_by_token_tracking_count; + } else { + ++simple_travel_beats_token_tracking_count; + } + } + } + CHECK(simple_travel_equals_token_tracking_count == 299); + CHECK(simple_travel_beaten_by_token_tracking_count == 697); + CHECK(simple_travel_beats_token_tracking_count == 4); + CHECK(full_optimise_fully_reduces_palindrome == 1000); + CHECK(full_optimise_does_not_destroy_palindrome == 0); + CHECK(token_tracking_pass_fully_reduces_palindrome == 976); + CHECK(token_tracking_pass_does_not_destroy_palindrome == 24); +} + +SCENARIO("specific swap list optimisation counterexamples") { + SwapList swaps; + SwapListOptimiser optimiser; + // Illustrates that general-travel irreducible does NOT imply token-tracking + // irreducible. (Of course, we haven't IMPLEMENTED general-travel reduction, + // but we can PROVE that general-travel irreducibility is equivalent to + // zero-travel and frontwards-travel irreducibility). + swaps.push_back(get_swap(0, 1)); + swaps.push_back(get_swap(0, 2)); + swaps.push_back(get_swap(0, 1)); + swaps.push_back(get_swap(0, 2)); + optimiser.optimise_pass_with_zero_travel(swaps); + CHECK(swaps.size() == 4); + optimiser.optimise_pass_with_frontward_travel(swaps); + CHECK(swaps.size() == 4); + optimiser.optimise_pass_with_token_tracking(swaps); + CHECK(str(swaps) == " (0,2) (0,1) "); + + // Are palindromes S + Reverse(S) ALWAYS optimised to an empty list by zero + // travel or frontwards travel passes? Seems so, but how to prove it? (We know + // that for IRREDUCIBILITY, zero-travel, frontwards-travel, general-travel + // give equivalent concepts, and token-tracking gives a strictly stronger + // pass, i.e. token-tracking irreducible => zero-travel irreducible, etc. but + // NOT conversely. But we have no such results for sequence reduction, and + // this counterexample illustrates that). + const vector swap_sequence_palindrome{ + {1, 2}, {1, 3}, {0, 2}, {1, 3}, {1, 3}, {2, 3}, {0, 1}, {1, 2}, + {0, 1}, {0, 2}, {1, 2}, {0, 3}, {0, 3}, {1, 2}, {0, 2}, {0, 1}, + {1, 2}, {0, 1}, {2, 3}, {1, 3}, {1, 3}, {0, 2}, {1, 3}, {1, 2}}; + REQUIRE(swap_sequence_palindrome.size() % 2 == 0); + for (unsigned ii = 0; ii < swap_sequence_palindrome.size(); ++ii) { + REQUIRE( + swap_sequence_palindrome[ii] == + swap_sequence_palindrome[swap_sequence_palindrome.size() - 1 - ii]); + } + + const auto push_back_swaps = [&swaps, &swap_sequence_palindrome]() { + swaps.fast_clear(); + for (auto swap : swap_sequence_palindrome) { + swaps.push_back(swap); + } + }; + + push_back_swaps(); + optimiser.optimise_pass_with_frontward_travel(swaps); + CHECK(swaps.size() == 0); + + push_back_swaps(); + optimiser.optimise_pass_with_zero_travel(swaps); + CHECK(swaps.size() == 0); + + push_back_swaps(); + optimiser.optimise_pass_with_token_tracking(swaps); + CHECK(str(swaps) == " (0,3) (0,1) (2,3) (0,2) (1,3) (1,2) "); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp new file mode 100644 index 0000000000..c9434d37d0 --- /dev/null +++ b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp @@ -0,0 +1,287 @@ +#include + +#include "TestUtils/PartialTsaTesting.hpp" +#include "TestUtils/ProblemGeneration.hpp" +#include "TokenSwapping/CyclesPartialTsa.hpp" +#include "TokenSwapping/RNG.hpp" +#include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "TokenSwapping/TSAUtils/DebugFunctions.hpp" +#include "TokenSwapping/TrivialTSA.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { +struct Tester { + vector messages_full_trivial_tsa; + vector messages_partial_trivial_tsa; + vector messages_cycles_tsa_0; + mutable RNG rng; + mutable TrivialTSA trivial_tsa; + mutable CyclesPartialTsa cycles_tsa; + + void run_test( + const Architecture& arch, const vector& problems, + size_t index) const { + trivial_tsa.set(TrivialTSA::Options::FULL_TSA); + CHECK( + run_tests( + arch, problems, rng, trivial_tsa, RequiredTsaProgress::FULL) == + messages_full_trivial_tsa[index]); + + trivial_tsa.set(TrivialTSA::Options::BREAK_AFTER_PROGRESS); + CHECK( + run_tests( + arch, problems, rng, trivial_tsa, RequiredTsaProgress::NONZERO) == + messages_partial_trivial_tsa[index]); + + CHECK( + run_tests(arch, problems, rng, cycles_tsa, RequiredTsaProgress::NONE) == + messages_cycles_tsa_0[index]); + } +}; + +} // namespace +SCENARIO("Partial TSA: Rings") { + const vector problem_messages{ + "[Ring3: 51582: v3 i1 f100 s1: 100 problems; 135 tokens]", + "[Ring4: 51481: v4 i1 f100 s1: 100 problems; 178 tokens]", + "[Ring5: 51644: v5 i1 f100 s1: 100 problems; 224 tokens]", + "[Ring6: 51528: v6 i1 f100 s1: 100 problems; 270 tokens]", + "[Ring7: 51496: v7 i1 f100 s1: 100 problems; 318 tokens]", + "[Ring30: 51633: v30 i1 f100 s1: 100 problems; 1473 tokens]"}; + + Tester tester; + tester.messages_full_trivial_tsa = { + "[TSA=Trivial FULL PF=RiverFlow\n" + "135 tokens; 69 total L; 55 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 50, max 100, av 82]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "178 tokens; 156 total L; 144 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 33, max 100, av 69]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "224 tokens; 260 total L; 273 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 33, max 100, av 59]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "270 tokens; 405 total L; 464 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 30, max 100, av 52]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "318 tokens; 511 total L; 596 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 30, max 100, av 49]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "1473 tokens; 10908 total L; 16873 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 26, max 50, av 36]"}; + + tester.messages_partial_trivial_tsa = { + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "135 tokens; 69 total L; 49 swaps.\n" + "L-decr %: min 50, max 100, av 97.\n" + "Power %: min 50, max 100, av 82]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "178 tokens; 156 total L; 101 swaps.\n" + "L-decr %: min 20, max 100, av 80.\n" + "Power %: min 16, max 100, av 67]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "224 tokens; 260 total L; 129 swaps.\n" + "L-decr %: min 12, max 100, av 61.\n" + "Power %: min 16, max 100, av 58]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "270 tokens; 405 total L; 186 swaps.\n" + "L-decr %: min 7, max 100, av 49.\n" + "Power %: min 8, max 100, av 52]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "318 tokens; 511 total L; 196 swaps.\n" + "L-decr %: min 7, max 100, av 39.\n" + "Power %: min 5, max 100, av 50]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "1473 tokens; 10908 total L; 273 swaps.\n" + "L-decr %: min 0, max 50, av 2.\n" + "Power %: min 1, max 100, av 46]"}; + + tester.messages_cycles_tsa_0 = { + "[TSA=Cycles PF=RiverFlow\n" + "135 tokens; 69 total L; 55 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 50, max 100, av 82]", + + "[TSA=Cycles PF=RiverFlow\n" + "178 tokens; 156 total L; 119 swaps.\n" + "L-decr %: min 0, max 100, av 97.\n" + "Power %: min 0, max 100, av 72]", + + "[TSA=Cycles PF=RiverFlow\n" + "224 tokens; 260 total L; 194 swaps.\n" + "L-decr %: min 0, max 100, av 94.\n" + "Power %: min 0, max 100, av 65]", + + "[TSA=Cycles PF=RiverFlow\n" + "270 tokens; 405 total L; 294 swaps.\n" + "L-decr %: min 0, max 100, av 92.\n" + "Power %: min 0, max 100, av 63]", + + "[TSA=Cycles PF=RiverFlow\n" + "318 tokens; 511 total L; 357 swaps.\n" + "L-decr %: min 0, max 100, av 89.\n" + "Power %: min 0, max 100, av 62]", + + "[TSA=Cycles PF=RiverFlow\n" + "1473 tokens; 10908 total L; 6344 swaps.\n" + "L-decr %: min 42, max 100, av 79.\n" + "Power %: min 50, max 86, av 61]"}; + + std::string arch_name; + const ProblemGenerator00 generator; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + auto num_vertices = index + 3; + if (num_vertices == 8) { + num_vertices = 30; + } + const RingArch arch(num_vertices); + arch_name = "Ring" + std::to_string(num_vertices); + + // OK to reuse RNG, as it's reset before each problem. + tester.rng.set_seed(); + const auto problems = generator.get_problems( + arch_name, arch, tester.rng, problem_messages[index]); + + tester.run_test(arch, problems, index); + } +} + +SCENARIO("Partial TSA: Fully connected") { + const vector problem_messages{ + "[K5: 51644: v5 i1 f100 s1: 100 problems; 224 tokens]", + "[K9: 51665: v9 i1 f100 s1: 100 problems; 416 tokens]"}; + + Tester tester; + tester.messages_full_trivial_tsa = { + "[TSA=Trivial FULL PF=RiverFlow\n" + "224 tokens; 172 total L; 149 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 50, max 100, av 64]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "416 tokens; 378 total L; 342 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 50, max 100, av 56]", + }; + + tester.messages_partial_trivial_tsa = { + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "224 tokens; 172 total L; 84 swaps.\n" + "L-decr %: min 25, max 100, av 74.\n" + "Power %: min 50, max 100, av 63]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "416 tokens; 378 total L; 98 swaps.\n" + "L-decr %: min 12, max 100, av 46.\n" + "Power %: min 50, max 100, av 58]"}; + + tester.messages_cycles_tsa_0 = { + "[TSA=Cycles PF=RiverFlow\n" + "224 tokens; 172 total L; 149 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 50, max 100, av 64]", + + "[TSA=Cycles PF=RiverFlow\n" + "416 tokens; 378 total L; 342 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 50, max 100, av 56]"}; + + std::string arch_name; + const ProblemGenerator00 generator; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + auto num_vertices = 4 * index + 5; + const FullyConnected arch(num_vertices); + arch_name = "K" + std::to_string(num_vertices); + tester.rng.set_seed(); + const auto problems = generator.get_problems( + arch_name, arch, tester.rng, problem_messages[index]); + + tester.run_test(arch, problems, index); + } +} + +SCENARIO("Partial TSA: Square grid") { + const vector> grid_parameters = { + {2, 3, 3}, {5, 5, 3}}; + const vector problem_messages{ + "[Grid(2,3,3): 51683: v18 i1 f100 s1: 100 problems; 865 tokens]", + "[Grid(5,5,3): 51573: v75 i1 f100 s1: 100 problems; 3751 tokens]"}; + + Tester tester; + tester.messages_full_trivial_tsa = { + "[TSA=Trivial FULL PF=RiverFlow\n" + "865 tokens; 1921 total L; 2592 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 31, max 100, av 41]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "3751 tokens; 15297 total L; 23212 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 28, max 50, av 36]"}; + + tester.messages_partial_trivial_tsa = { + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "865 tokens; 1921 total L; 153 swaps.\n" + "L-decr %: min 2, max 100, av 12.\n" + "Power %: min 8, max 100, av 48]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "3751 tokens; 15297 total L; 193 swaps.\n" + "L-decr %: min 0, max 25, av 1.\n" + "Power %: min 5, max 100, av 44]"}; + + tester.messages_cycles_tsa_0 = { + "[TSA=Cycles PF=RiverFlow\n" + "865 tokens; 1921 total L; 1425 swaps.\n" + "L-decr %: min 60, max 100, av 95.\n" + "Power %: min 46, max 100, av 61]", + + "[TSA=Cycles PF=RiverFlow\n" + "3751 tokens; 15297 total L; 11464 swaps.\n" + "L-decr %: min 83, max 100, av 95.\n" + "Power %: min 50, max 79, av 59]"}; + + const ProblemGenerator00 generator; + + for (size_t index = 0; index < grid_parameters.size(); ++index) { + const auto& parameters = grid_parameters[index]; + const SquareGrid arch(parameters[0], parameters[1], parameters[2]); + std::stringstream ss; + ss << "Grid(" << parameters[0] << "," << parameters[1] << "," + << parameters[2] << ")"; + + tester.rng.set_seed(); + const auto problems = generator.get_problems( + ss.str(), arch, tester.rng, problem_messages[index]); + + tester.run_test(arch, problems, index); + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp new file mode 100644 index 0000000000..6a8d6472aa --- /dev/null +++ b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp @@ -0,0 +1,243 @@ +#include +#include +#include + +#include "TokenSwapping/RNG.hpp" +#include "TokenSwapping/VectorListHybrid.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +typedef VectorListHybrid List; +typedef List::ID ID; + +SCENARIO("Reversing a list") { + RNG rng; + List list; + auto copied_elements = list.to_vector(); + vector copied_elements_again; + REQUIRE(copied_elements.empty()); + for (int count = 0; count < 1000; ++count) { + const unsigned x = rng.get_size_t(1000); + switch (x % 7) { + // Should we delete? + case 0: + if (list.size() != 0) { + const auto id = list.front_id().value(); + list.erase(id); + } + break; + case 1: + if (list.size() != 0) { + const auto id = list.back_id().value(); + list.erase(id); + } + break; + case 2: + list.clear(); + break; + default: + break; + } + if (x % 2 == 0) { + list.push_front(x); + } else { + list.push_back(x); + } + copied_elements = list.to_vector(); + list.reverse(); + copied_elements_again = list.to_vector(); + std::reverse(copied_elements.begin(), copied_elements.end()); + REQUIRE(copied_elements == copied_elements_again); + } +} + +// Write the contents to a string for testing, possibly including IDs. +static std::string repr(const List& list, bool include_ids) { + std::stringstream ss; + ss << "[size " << list.size() << ": "; + for (auto id_opt = list.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = list.next(id); + ss << list.at(id) << " "; + } + if (include_ids) { + ss << "; ids: "; + for (auto id_opt = list.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = list.next(id); + ss << id << " "; + } + } + ss << "]"; + return ss.str(); +} + +// In "operations", a positive number p means go to position p % size() in the +// list, and insert a number there. A negative number n means do the same thing +// with abs(n) % size(), but erase instead of insert. Returns a string +// representing the elements which were erased/inserted, again using negative +// numbers to denote erasure. Does NOT give the IDs. +static std::string perform_operation( + const vector& operations, List& list, unsigned& next_element) { + std::stringstream ss; + ss << "["; + for (int position_code : operations) { + REQUIRE(position_code != 0); + const auto size = list.size(); + if (size == 0) { + if (position_code > 0) { + list.push_back(next_element); + ss << "new: " << next_element << " "; + next_element += 100; + continue; + } + // Cannot erase from an empty list! + ss << "; "; + continue; + } + // It's nonempty. + unsigned position = std::abs(position_code); + position %= size; + ID id = list.front_id().value(); + for (unsigned nn = 0; nn < position; ++nn) { + const auto next_id = list.next(id); + REQUIRE(next_id); + id = next_id.value(); + } + ss << "at " << position << ": "; + if (position_code > 0) { + ss << next_element << " "; + const ID new_id = list.insert_after(id); + list.at(new_id) = next_element; + next_element += 100; + continue; + } + ss << "-" << list.at(id) << " "; + list.erase(id); + } + ss << "]"; + return ss.str(); +} + +namespace { +struct Result { + std::string initial_op_str; + std::string list_str_after_one_op; + std::string list_str_after_one_op_without_ids; + std::string op_str_after_two_ops; + std::string list_str_after_two_ops; + std::string list_str_after_two_ops_without_ids; + + Result(const vector& operations, List& list, unsigned& next_element) + : initial_op_str(perform_operation(operations, list, next_element)), + list_str_after_one_op(repr(list, true)), + list_str_after_one_op_without_ids(repr(list, false)), + op_str_after_two_ops(perform_operation(operations, list, next_element)), + list_str_after_two_ops(repr(list, true)), + list_str_after_two_ops_without_ids(repr(list, false)) {} + + void check_equal_contents_without_ids(const Result& other) const { + CHECK(initial_op_str == other.initial_op_str); + CHECK( + list_str_after_one_op_without_ids == + other.list_str_after_one_op_without_ids); + CHECK(op_str_after_two_ops == other.op_str_after_two_ops); + CHECK( + list_str_after_two_ops_without_ids == + other.list_str_after_two_ops_without_ids); + } + + void check_equal_id_data(const Result& other) const { + CHECK(list_str_after_one_op == other.list_str_after_one_op); + CHECK(list_str_after_two_ops == other.list_str_after_two_ops); + } + + void check_different_id_data(const Result& other) const { + CHECK(list_str_after_one_op != other.list_str_after_one_op); + CHECK(list_str_after_two_ops != other.list_str_after_two_ops); + } +}; +} // namespace + +// We want to test that lists have equal or different contents, +// with/without clear/fast_clear, etc. +// The same sequences of logical operations +// (erase, insert, etc.) applied to a new list or a fast_cleared list might NOT +// preserve IDs, but should preserve the contents. With clear(), it should ALSO +// preserve IDs. +SCENARIO("Inserting, erasing, clearing tests") { + // These are just some random numbers. + const vector operations{-10, -4, 1, 3, -8, 2, -2, -3, -5, -9, + -6, -2, -7, 2, 5, -8, 6, -4, 10, 7, + -10, -1, 5, 6, 9, 1, 4, -7, -1, 4, + 8, -9, 8, -3, -5, -6, 9, 3, 7, 10}; + + List list; + unsigned next_element = 999; + const Result result_with_new_object(operations, list, next_element); + + // Also test clearing empty objects. + { + // bits 00 mean do nothing, 01 means clear, 11 means fast clear. + const vector clear_options{ + 0, // nothing, + 0x5, // clear, clear, + 0x7, // fast clear, clear, + 0xD, // clear, fast clear, + 0xF, // fast clear, fast clear + 0x15 // clear, clear, clear + }; + for (unsigned option : clear_options) { + List empty_list; + unsigned copy = option; + while (copy != 0) { + const unsigned code = copy & 0x3; + copy >>= 2; + switch (code) { + case 1: + empty_list.clear(); + break; + case 3: + empty_list.fast_clear(); + break; + default: { + REQUIRE(false); + } + } + } + next_element = 999; + const Result result_with_empty_list(operations, empty_list, next_element); + result_with_empty_list.check_equal_contents_without_ids( + result_with_new_object); + result_with_empty_list.check_equal_id_data(result_with_new_object); + } + } + // Now repeat the operations. + list.clear(); + { + INFO("second time, cleared list"); + next_element = 999; + const Result result_with_cleared_object(operations, list, next_element); + result_with_cleared_object.check_equal_contents_without_ids( + result_with_new_object); + result_with_cleared_object.check_equal_id_data(result_with_new_object); + } + list.fast_clear(); + { + INFO("third time, fast cleared list"); + next_element = 999; + const Result result_with_cleared_object(operations, list, next_element); + result_with_cleared_object.check_equal_contents_without_ids( + result_with_new_object); + result_with_cleared_object.check_different_id_data(result_with_new_object); + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp new file mode 100644 index 0000000000..e905b6a024 --- /dev/null +++ b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp @@ -0,0 +1,295 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "TokenSwapping/RNG.hpp" +#include "TokenSwapping/VectorListHybridSkeleton.hpp" + +; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +// A slower implementation of VectorListHybridSkeleton +// using linked lists +struct VLHS_tester_reimplementation { + // Each node will contain the index it was given. + mutable std::list data; + + void clear() { data.clear(); } + size_t size() const { return data.size(); } + size_t front_index() const { return data.front(); } + size_t back_index() const { return data.back(); } + + std::list::iterator find(size_t index) { + for (auto iter = data.begin(); iter != data.end(); ++iter) { + if (*iter == index) { + return iter; + } + } + throw std::runtime_error( + std::string("index ") + std::to_string(index) + " not found"); + } + + std::list::const_iterator find(size_t index) const { + for (auto citer = data.cbegin(); citer != data.cend(); ++citer) { + if (*citer == index) { + return citer; + } + } + throw std::runtime_error( + std::string("index ") + std::to_string(index) + " not found"); + } + + std::optional next(size_t index) const { + auto citer = find(index); + ++citer; + if (citer == data.cend()) { + return {}; + } + return *citer; + } + + std::optional previous(size_t index) const { + auto citer = find(index); + --citer; + if (citer != data.cend()) { + return {}; + } + return *citer; + } + + void erase(size_t index) { + auto iter = find(index); + data.erase(iter); + } + + void insert_for_empty_list(size_t new_index) { + REQUIRE(data.empty()); + data.push_front(new_index); + } + + void insert_after(size_t index, size_t new_index) { + auto iter = find(index); + // We can only insert BEFORE an iter with STL + ++iter; + if (iter == data.end()) { + // We were at the back. + data.push_back(new_index); + return; + } + // We're now after the node, we insert before + data.insert(iter, new_index); + } + + void insert_before(size_t index, size_t new_index) { + auto iter = find(index); + data.insert(iter, new_index); + } +}; + +// Keep track of which indices have currently not yet been erased +struct ValidIndices { + std::set indices; + + bool contains(size_t index) const { return indices.count(index) != 0; } + void check_and_insert_new_index(size_t index) { + REQUIRE(index != VectorListHybridSkeleton::get_invalid_index()); + REQUIRE(indices.count(index) == 0); + indices.insert(index); + } + + void check_and_erase_index(size_t index) { + REQUIRE(indices.count(index) != 0); + indices.erase(index); + } + + size_t get_index(RNG& rng) const { + REQUIRE(!indices.empty()); + auto citer = indices.cbegin(); + for (size_t ii = rng.get_size_t(indices.size() - 1); ii != 0; --ii) { + ++citer; + } + return *citer; + } +}; + +void require_equal_indices( + size_t index, const std::optional& index_opt) { + if (index == VectorListHybridSkeleton::get_invalid_index()) { + REQUIRE(!index_opt); + return; + } + REQUIRE(index_opt); + REQUIRE(index_opt.value() == index); +} + +bool are_equal( + const VectorListHybridSkeleton& vlhs, + const VLHS_tester_reimplementation& tester, + const ValidIndices& valid_indices) { + if (vlhs.size() != tester.size()) { + return false; + } + if (vlhs.size() == 0) { + return true; + } + auto citer = tester.data.cbegin(); + for (auto index = vlhs.front_index(); + index != VectorListHybridSkeleton::get_invalid_index(); + index = vlhs.next(index)) { + if (*citer != index) { + return false; + } + REQUIRE(valid_indices.contains(index)); + ++citer; + } + REQUIRE(citer == tester.data.cend()); + REQUIRE(*tester.data.cbegin() == vlhs.front_index()); + REQUIRE(*tester.data.crbegin() == vlhs.back_index()); + return true; +} + +SCENARIO("Random operations preserve VLHS") { + RNG rng; + VLHS_tester_reimplementation tester; + VectorListHybridSkeleton vlhs; + ValidIndices valid_indices; + REQUIRE(are_equal(vlhs, tester, valid_indices)); + + for (int op_counter = 0; op_counter < 10000; ++op_counter) { + INFO("counter=" << op_counter); + if (op_counter + 1 % 100 == 0) { + vlhs.clear(); + tester.clear(); + valid_indices.indices.clear(); + } + bool should_insert = rng.check_percentage(50); + if (valid_indices.indices.empty()) { + should_insert = true; + } + if (valid_indices.indices.size() > 10) { + should_insert = false; + } + if (should_insert) { + if (valid_indices.indices.empty()) { + vlhs.insert_for_empty_list(); + const auto new_index = vlhs.front_index(); + REQUIRE(new_index == vlhs.back_index()); + tester.insert_for_empty_list(new_index); + valid_indices.check_and_insert_new_index(new_index); + } else { + const auto index = valid_indices.get_index(rng); + const bool insert_after = rng.check_percentage(50); + + if (insert_after) { + vlhs.insert_after(index); + const auto new_index = vlhs.next(index); + tester.insert_after(index, new_index); + valid_indices.check_and_insert_new_index(new_index); + } else { + vlhs.insert_before(index); + const auto new_index = vlhs.previous(index); + tester.insert_before(index, new_index); + valid_indices.check_and_insert_new_index(new_index); + } + } + } else { + // We erase instead. + const auto index = valid_indices.get_index(rng); + vlhs.erase(index); + tester.erase(index); + valid_indices.check_and_erase_index(index); + } + REQUIRE(are_equal(vlhs, tester, valid_indices)); + } +} + +static std::string get_fixed_ops_str(bool do_fast_clear) { + std::stringstream ss; + VectorListHybridSkeleton vlhs; + ss << vlhs.debug_str(); + vlhs.insert_for_empty_list(); + ss << "\nInsert: " << vlhs.debug_str(); + vlhs.insert_after(vlhs.front_index()); + ss << "\nInsert after front: " << vlhs.debug_str(); + const auto id = vlhs.front_index(); + vlhs.insert_before(id); + ss << "\nInsert before front: " << vlhs.debug_str(); + vlhs.insert_after(id); + ss << "\nInsert after " << id << ": " << vlhs.debug_str(); + vlhs.erase(3); + ss << "\nErase 3: " << vlhs.debug_str(); + if (do_fast_clear) { + vlhs.fast_clear(); + ss << "\nFast clear: " << vlhs.debug_str(); + } else { + vlhs.clear(); + ss << "\nClear: " << vlhs.debug_str(); + } + vlhs.insert_for_empty_list(); + ss << "\nInsert: " << vlhs.debug_str(); + return ss.str(); +} + +SCENARIO("Some fixed ops") { + // The only difference should be in the internal link values. + const std::string common_prefix{ + "VLHS: size 0, front NULL back NULL, del.front NULL\n" + "Active links: forward []\n" + "Backward ()\n" + "Del.links: {}\n" + "Insert: VLHS: size 1, front 0 back 0, del.front NULL\n" + "Active links: forward [0->]\n" + "Backward (0->)\n" + "Del.links: {}\n" + "Insert after front: VLHS: size 2, front 0 back 1, del.front NULL\n" + "Active links: forward [0->1->]\n" + "Backward (1->0->)\n" + "Del.links: {}\n" + "Insert before front: VLHS: size 3, front 2 back 1, del.front NULL\n" + "Active links: forward [2->0->1->]\n" + "Backward (1->0->2->)\n" + "Del.links: {}\n" + "Insert after 0: VLHS: size 4, front 2 back 1, del.front NULL\n" + "Active links: forward [2->0->3->1->]\n" + "Backward (1->3->0->2->)\n" + "Del.links: {}\n" + "Erase 3: VLHS: size 3, front 2 back 1, del.front 3\n" + "Active links: forward [2->0->1->]\n" + "Backward (1->0->2->)\n" + "Del.links: {3->}\n"}; + const std::string fast_clear_suffix{ + "Fast clear: VLHS: size 0, front NULL back NULL, del.front 2\n" + "Active links: forward []\n" + "Backward ()\n" + "Del.links: {2->0->1->3->}\n" + "Insert: VLHS: size 1, front 2 back 2, del.front 0\n" + "Active links: forward [2->]\n" + "Backward (2->)\n" + "Del.links: {0->1->3->}"}; + const std::string clear_suffix{ + "Clear: VLHS: size 0, front NULL back NULL, del.front 0\n" + "Active links: forward []\n" + "Backward ()\n" + "Del.links: {0->1->2->3->}\n" + "Insert: VLHS: size 1, front 0 back 0, del.front 1\n" + "Active links: forward [0->]\n" + "Backward (0->)\n" + "Del.links: {1->2->3->}"}; + const auto fast_clear_str = get_fixed_ops_str(true); + CHECK(fast_clear_str == common_prefix + fast_clear_suffix); + + const auto clear_str = get_fixed_ops_str(false); + CHECK(clear_str == common_prefix + clear_suffix); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_main_entry_functions.cpp b/tket/tests/TokenSwapping/test_main_entry_functions.cpp new file mode 100644 index 0000000000..9711b2afcd --- /dev/null +++ b/tket/tests/TokenSwapping/test_main_entry_functions.cpp @@ -0,0 +1,99 @@ +#include +#include + +#include "TokenSwapping/RNG.hpp" +#include "TokenSwapping/main_entry_functions.hpp" + +; +using std::vector; + +// Detailed algorithmic checks with quantitative benchmarks +// are done elsewhere, so this is really just checking conversion. + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("main entry function for TSA") { + // Will summarise relevant data, so that we can see any changes. + std::stringstream problem_ss; + + const SquareGrid arch(3, 4, 2); + const auto nodes = arch.get_all_uids_vec(); + const auto edges = arch.get_connections_vec(); + problem_ss << nodes.size() << " nodes; " << edges.size() << " edges."; + + // The value is the set of all neighbouring nodes. + std::map> allowed_edges_map; + for (auto [n1, n2] : edges) { + REQUIRE(n1 != n2); + allowed_edges_map[n1].insert(n2); + allowed_edges_map[n2].insert(n1); + } + + // Key: a node Value: its original position in "nodes" + std::map original_vertex_indices; + for (size_t ii = 0; ii < nodes.size(); ++ii) { + original_vertex_indices[nodes[ii]] = ii; + } + RNG rng_to_generate_swaps; + auto nodes_copy = nodes; + rng_to_generate_swaps.do_shuffle(nodes_copy); + const auto node_final_positions = nodes_copy; + + problem_ss << " Node mapping:"; + NodeMapping node_mapping; + for (size_t ii = 0; ii < nodes.size(); ++ii) { + problem_ss << "\ni=" << ii << " : " << node_final_positions[ii].repr() + << " -> " << nodes[ii].repr(); + node_mapping[node_final_positions[ii]] = nodes[ii]; + } + CHECK( + problem_ss.str() == + "24 nodes; 46 edges. Node mapping:\n" + "i=0 : gridNode[0, 0, 0] -> gridNode[0, 0, 0]\n" + "i=1 : gridNode[0, 3, 0] -> gridNode[0, 0, 1]\n" + "i=2 : gridNode[2, 1, 0] -> gridNode[0, 1, 0]\n" + "i=3 : gridNode[0, 1, 1] -> gridNode[0, 1, 1]\n" + "i=4 : gridNode[2, 2, 0] -> gridNode[0, 2, 0]\n" + "i=5 : gridNode[1, 1, 1] -> gridNode[0, 2, 1]\n" + "i=6 : gridNode[0, 0, 1] -> gridNode[0, 3, 0]\n" + "i=7 : gridNode[0, 3, 1] -> gridNode[0, 3, 1]\n" + "i=8 : gridNode[1, 3, 0] -> gridNode[1, 0, 0]\n" + "i=9 : gridNode[1, 0, 0] -> gridNode[1, 0, 1]\n" + "i=10 : gridNode[2, 2, 1] -> gridNode[1, 1, 0]\n" + "i=11 : gridNode[0, 1, 0] -> gridNode[1, 1, 1]\n" + "i=12 : gridNode[2, 0, 1] -> gridNode[1, 2, 0]\n" + "i=13 : gridNode[1, 2, 1] -> gridNode[1, 2, 1]\n" + "i=14 : gridNode[1, 3, 1] -> gridNode[1, 3, 0]\n" + "i=15 : gridNode[1, 0, 1] -> gridNode[1, 3, 1]\n" + "i=16 : gridNode[2, 0, 0] -> gridNode[2, 0, 0]\n" + "i=17 : gridNode[2, 1, 1] -> gridNode[2, 0, 1]\n" + "i=18 : gridNode[0, 2, 1] -> gridNode[2, 1, 0]\n" + "i=19 : gridNode[1, 2, 0] -> gridNode[2, 1, 1]\n" + "i=20 : gridNode[0, 2, 0] -> gridNode[2, 2, 0]\n" + "i=21 : gridNode[1, 1, 0] -> gridNode[2, 2, 1]\n" + "i=22 : gridNode[2, 3, 0] -> gridNode[2, 3, 0]\n" + "i=23 : gridNode[2, 3, 1] -> gridNode[2, 3, 1]"); + + // Calculate swaps to enact the permutation. + const auto node_swaps = get_swaps(arch, node_mapping); + // This will hopefully decrease over time + // as we improve the algorithm. + CHECK(node_swaps.size() == 29); + + // Go back to the original configuration, and perform the swaps. + nodes_copy = nodes; + for (const auto& node_swap : node_swaps) { + REQUIRE(allowed_edges_map.at(node_swap.first).count(node_swap.second) != 0); + const auto index1 = original_vertex_indices.at(node_swap.first); + const auto index2 = original_vertex_indices.at(node_swap.second); + REQUIRE(index1 != index2); + std::swap(nodes_copy[index1], nodes_copy[index2]); + } + REQUIRE(nodes_copy == node_final_positions); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 66399cc3c1..bd8a27fbed 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -39,6 +39,33 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/Graphs/test_UIDConnectivity.cpp ${TKET_TESTS_DIR}/Graphs/test_ArticulationPoints.cpp ${TKET_TESTS_DIR}/Graphs/test_TreeSearch.cpp + ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedCompleteSolutions.cpp + ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedSwapSequences.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/NeighboursFromEdges.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/PermutationTestUtils.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/BestTsaTester.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DecodedProblemData.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/FullTsaTesting.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/PartialTsaTesting.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ProblemGeneration.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/TestStatsStructs.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_BestTsaFixedSwapSequences.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_DistancesFromArchitecture.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_FullTsa.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_main_entry_functions.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_RiverFlowPathFinder.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_SwapList.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_SwapListOptimiser.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_VariousPartialTsa.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_VectorListHybrid.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_VectorListHybridSkeleton.cpp ${TKET_TESTS_DIR}/test_PauliString.cpp ${TKET_TESTS_DIR}/Ops/test_ClassicalOps.cpp ${TKET_TESTS_DIR}/Ops/test_Expression.cpp From 8ba7968b35d0fa07460e280fc4e3845eb2ab02f5 Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Mon, 25 Oct 2021 19:42:09 +0100 Subject: [PATCH 003/146] Add "MappingManager" class and port older routing solution (#95) * Copy code from private repository * Add binders for mapping module * Adding mapping module to setup.py * Add shared_ptr to Architecture subclasses in binder file * Port python test for mapping module --- pytket/CMakeLists.txt | 1 + pytket/binders/mapping.cpp | 80 ++ pytket/binders/routing.cpp | 25 +- pytket/pytket/mapping/__init__.py | 14 + pytket/setup.py | 1 + pytket/tests/mapping_test.py | 195 +++++ tket/src/Architecture/Architectures.cpp | 43 +- tket/src/Architecture/Architectures.hpp | 8 +- tket/src/CMakeLists.txt | 10 + tket/src/Mapping/LexiRoute.cpp | 580 +++++++++++++++ tket/src/Mapping/LexiRoute.hpp | 192 +++++ .../src/Mapping/LexicographicalComparison.cpp | 131 ++++ .../src/Mapping/LexicographicalComparison.hpp | 87 +++ tket/src/Mapping/MappingFrontier.cpp | 472 ++++++++++++ tket/src/Mapping/MappingFrontier.hpp | 142 ++++ tket/src/Mapping/MappingManager.cpp | 75 ++ tket/src/Mapping/MappingManager.hpp | 50 ++ tket/src/Mapping/RoutingMethod.hpp | 52 ++ tket/src/Mapping/RoutingMethodCircuit.cpp | 68 ++ tket/src/Mapping/RoutingMethodCircuit.hpp | 58 ++ tket/tests/test_LexiRoute.cpp | 441 +++++++++++ tket/tests/test_LexicographicalComparison.cpp | 211 ++++++ tket/tests/test_MappingFrontier.cpp | 695 ++++++++++++++++++ tket/tests/test_MappingManager.cpp | 37 + tket/tests/test_RoutingMethod.cpp | 191 +++++ tket/tests/tkettestsfiles.cmake | 5 + 26 files changed, 3850 insertions(+), 14 deletions(-) create mode 100644 pytket/binders/mapping.cpp create mode 100644 pytket/pytket/mapping/__init__.py create mode 100644 pytket/tests/mapping_test.py create mode 100644 tket/src/Mapping/LexiRoute.cpp create mode 100644 tket/src/Mapping/LexiRoute.hpp create mode 100644 tket/src/Mapping/LexicographicalComparison.cpp create mode 100644 tket/src/Mapping/LexicographicalComparison.hpp create mode 100644 tket/src/Mapping/MappingFrontier.cpp create mode 100644 tket/src/Mapping/MappingFrontier.hpp create mode 100644 tket/src/Mapping/MappingManager.cpp create mode 100644 tket/src/Mapping/MappingManager.hpp create mode 100644 tket/src/Mapping/RoutingMethod.hpp create mode 100644 tket/src/Mapping/RoutingMethodCircuit.cpp create mode 100644 tket/src/Mapping/RoutingMethodCircuit.hpp create mode 100644 tket/tests/test_LexiRoute.cpp create mode 100644 tket/tests/test_LexicographicalComparison.cpp create mode 100644 tket/tests/test_MappingFrontier.cpp create mode 100644 tket/tests/test_MappingManager.cpp create mode 100644 tket/tests/test_RoutingMethod.cpp diff --git a/pytket/CMakeLists.txt b/pytket/CMakeLists.txt index 079495b427..6fa21d2996 100644 --- a/pytket/CMakeLists.txt +++ b/pytket/CMakeLists.txt @@ -60,3 +60,4 @@ build_module(pauli binders/pauli.cpp) build_module(logging binders/logging.cpp) build_module(utils_serialization binders/utils_serialization.cpp) build_module(tailoring binders/tailoring.cpp) +build_module(mapping binders/mapping.cpp) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp new file mode 100644 index 0000000000..5e8cee5dcc --- /dev/null +++ b/pytket/binders/mapping.cpp @@ -0,0 +1,80 @@ +#include +#include +#include + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/RoutingMethodCircuit.hpp" + +namespace py = pybind11; + +namespace tket { +PYBIND11_MODULE(mapping, m) { + py::class_( + m, "RoutingMethod", + "Parent class for RoutingMethod, for inheritance purposes only, not for " + "usage.") + .def(py::init<>()); + + py::class_( + m, "RoutingMethodCircuit", + "The RoutingMethod class captures a method for partially mapping logical" + "subcircuits to physical operations as permitted by some architecture. " + "Ranked RoutingMethod objects are used by the MappingManager to route " + "whole circuits.") + .def( + py::init< + const std::function( + const Circuit&, const ArchitecturePtr&)>&, + const std::function, + unsigned, unsigned>(), + "Constructor for a routing method defined by partially routing " + "subcircuits.\n\n:param route_subcircuit: A function declaration " + "that given a Circuit and Architecture object, returns a tuple " + "containing a new modified circuit, the initial logical to physical " + "qubit mapping of the modified circuit and the permutation of " + "'logical to physical qubit mapping given operations in the " + "modified circuit\n:param check_subcircuit: A function declaration " + "that given a Circuit and Architecture object, returns a bool " + "stating whether the given method can modify the " + "given circuit\n:param max_size: The maximum number of gates " + "permitted in a subcircuit\n:param max_depth: The maximum permitted " + "depth of a subcircuit.", + py::arg("route_subcircuit"), py::arg("check_subcircuit"), + py::arg("max_size"), py::arg("max_depth")); + + py::class_( + m, "LexiRouteRoutingMethod", + "Defines a RoutingMethod object for mapping circuits that uses the " + "Lexicographical Comparison approach outlined in arXiv:1902.08091.") + .def( + py::init(), + "LexiRoute constructor.\n\n:param lookahead: Maximum depth of " + "lookahead " + "employed when picking SWAP for purpose of logical to physical " + "mapping."); + + py::class_( + m, "MappingManager", + "Defined by a pytket Architecture object, maps Circuit logical Qubits " + "to Physically permitted Architecture qubits. Mapping is completed by " + "sequential routing (full or partial) of subcircuits. Custom method for " + "routing (full or partial) of subcircuits can be defined in python " + "layer.") + .def( + py::init(), + "MappingManager constructor.\n\n:param architecture: pytket " + "Architecure object MappingManager object defined by.", + py::arg("architecture")) + .def( + "route_circuit", &MappingManager::route_circuit, + "Maps from given logical circuit to physical circuit. Modification " + "defined by route_subcircuit, but typically this proceeds by " + "insertion of SWAP gates that permute logical qubits on physical " + "qubits. \n\n:param circuit: pytket circuit to be mapped" + "\n:param routing_methods: Ranked methods to use for routing " + "subcircuits. In given order, each method is sequentially checked " + "for viability, with the first viable method being used.", + py::arg("circuit"), py::arg("routing_methods")); +} +} // namespace tket \ No newline at end of file diff --git a/pytket/binders/routing.cpp b/pytket/binders/routing.cpp index b35ecb1add..9043d9c5c4 100644 --- a/pytket/binders/routing.cpp +++ b/pytket/binders/routing.cpp @@ -93,7 +93,7 @@ std::pair route( } PYBIND11_MODULE(routing, m) { - py::class_( + py::class_( m, "Architecture", "The base architecture class, describing the connectivity of " "qubits on a device.") @@ -106,11 +106,26 @@ PYBIND11_MODULE(routing, m) { "operations", py::arg("connections")) .def( - py::init>>(), + py::init> &>(), "The constructor for an architecture with connectivity " "between qubits.\n\n:param connections: A list of pairs " "representing Nodes that can perform two-qubit operations", py::arg("connections")) + .def( + "__repr__", + [](const Architecture &arc) { + return ""; + }) + .def( + "get_distance", &Architecture::get_distance, + "given two nodes in Architecture, " + "returns distance between them", + py::arg("node_0"), py::arg("node_1")) + .def( + "get_adjacent_nodes", &Architecture::get_neighbour_uids, + "given a node, returns adjacent nodes in Architecture.", + py::arg("node")) .def_property_readonly( "nodes", &Architecture::get_all_uids_vec, "Returns all nodes of architecture as UnitID objects. ") @@ -147,7 +162,7 @@ PYBIND11_MODULE(routing, m) { "equal " "if they have the same set of nodes and the same connections between " "nodes."); - py::class_( + py::class_, Architecture>( m, "SquareGrid", "Architecture class for qubits arranged in a square lattice of " "given number of rows and columns. Qubits are arranged with qubits " @@ -196,7 +211,7 @@ PYBIND11_MODULE(routing, m) { ", columns=" + std::to_string(arc.get_columns()) + ", layers=" + std::to_string(arc.get_layers()) + ">"; }); - py::class_( + py::class_, Architecture>( m, "FullyConnected", "Architecture class for number of qubits connected to every other " "qubit.") @@ -210,7 +225,7 @@ PYBIND11_MODULE(routing, m) { return ""; }); - py::class_( + py::class_, Architecture>( m, "RingArch", "Architecture class for number of qubits arranged in a ring.") .def( diff --git a/pytket/pytket/mapping/__init__.py b/pytket/pytket/mapping/__init__.py new file mode 100644 index 0000000000..faddc73127 --- /dev/null +++ b/pytket/pytket/mapping/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019-2021 Cambridge Quantum Computing +# +# You may not use this file except in compliance with the Licence. +# You may obtain a copy of the Licence in the LICENCE file accompanying +# these documents or at: +# +# https://cqcl.github.io/pytket/build/html/licence.html +"""The mapping module provides an API to interact with the + tket :py:class:`MappingManager` suite, with methods for + mapping logical circuits to physical circuits and for + defining custom routing solutions. This module is provided + in binary form during the PyPI installation.""" + +from pytket._tket.mapping import * # type: ignore diff --git a/pytket/setup.py b/pytket/setup.py index c3b5569d54..e836e40500 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -195,6 +195,7 @@ def build_extension(self, ext): "routing", "transform", "tailoring", + "mapping", ] diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py new file mode 100644 index 0000000000..c4e670b2a5 --- /dev/null +++ b/pytket/tests/mapping_test.py @@ -0,0 +1,195 @@ +# Copyright 2019-2021 Cambridge Quantum Computing +# +# You may not use this file except in compliance with the Licence. +# You may obtain a copy of the Licence in the LICENCE file accompanying +# these documents or at: +# +# https://cqcl.github.io/pytket/build/html/licence.html + +from pytket.mapping import MappingManager, RoutingMethodCircuit, LexiRouteRoutingMethod # type: ignore +from pytket.routing import Architecture # type: ignore +from pytket import Circuit, OpType +from pytket.circuit import Node # type: ignore +from typing import Tuple, Dict + + +# simple deterministic heuristic used for testing purposes +def route_subcircuit_func( + circuit: Circuit, architecture: Architecture +) -> Tuple[Circuit, Dict[Node, Node], Dict[Node, Node]]: + # make a replacement circuit with identical unitds + replacement_circuit = Circuit() + for qb in circuit.qubits: + replacement_circuit.add_qubit(qb) + for bit in circuit.bits: + replacement_circuit.add_bit(bit) + + # "place" unassigned logical qubits to physical qubits + unused_nodes = list(architecture.nodes) + relabelling_map = dict() + + for qb in circuit.qubits: + for n in unused_nodes: + if n == qb: + unused_nodes.remove(n) + + for qb in circuit.qubits: + if qb not in set(architecture.nodes): + relabelling_map[qb] = unused_nodes.pop() + else: + # this is so later architecture.get_distance works + # yes this is obviously bad, buts its a simple test heuristic so who cares?! + relabelling_map[qb] = qb + + replacement_circuit.rename_units(relabelling_map) + permutation_map = dict() + for qb in replacement_circuit.qubits: + permutation_map[qb] = qb + + # very simple heuristic -> the first time a physically invalid CX is encountered, add a SWAP + # then add all remaining gates as is (using updated physical mapping) + # note this is possible as routing accepts partially solved problems + max_swaps = 1 + swaps_added = 0 + for com in circuit.get_commands(): + rp_qubits = [permutation_map[relabelling_map[q]] for q in com.qubits] + if len(com.qubits) > 2: + raise ValueError("Command must have maximum two qubits") + if len(com.qubits) == 1: + replacement_circuit.add_gate(com.op.type, rp_qubits) + if len(com.qubits) == 2: + if swaps_added < max_swaps: + # get node references for some stupid reason... + # theres some stupid casting issue + # just passing qubits didnt work.. whatever + for n in architecture.nodes: + if n == rp_qubits[0]: + n0 = n + if n == rp_qubits[1]: + n1 = n + distance = architecture.get_distance(n0, n1) + if distance > 1: + for node in architecture.get_adjacent_nodes(n0): + if architecture.get_distance( + node, n1 + ) < architecture.get_distance(n0, n1): + replacement_circuit.add_gate( + OpType.SWAP, [rp_qubits[0], node] + ) + + permutation_map[rp_qubits[0]] = node + permutation_map[node] = rp_qubits[0] + rp_qubits = [ + permutation_map[relabelling_map[q]] for q in com.qubits + ] + swaps_added = swaps_added + 1 + break + + replacement_circuit.add_gate(com.op.type, rp_qubits) + + return (replacement_circuit, relabelling_map, permutation_map) + + +def check_subcircuit_func_true(circuit: Circuit, architecture: Architecture) -> bool: + return True + + +def check_subcircuit_func_false(circuit: Circuit, architecture: Architecture) -> bool: + return False + + +def test_LexiRouteRoutingMethod() -> None: + test_c = Circuit(3).CX(0, 1).CX(0, 2).CX(1, 2) + nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] + test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) + test_mm = MappingManager(test_a) + test_mm.route_circuit(test_c, [LexiRouteRoutingMethod(50)]) + routed_commands = test_c.get_commands() + + assert routed_commands[0].op.type == OpType.CX + assert routed_commands[0].qubits == [nodes[1], nodes[0]] + assert routed_commands[1].op.type == OpType.CX + assert routed_commands[1].qubits == [nodes[1], nodes[2]] + assert routed_commands[2].op.type == OpType.SWAP + assert routed_commands[2].qubits == [nodes[2], nodes[1]] + assert routed_commands[3].op.type == OpType.CX + assert routed_commands[3].qubits == [nodes[0], nodes[1]] + + +def test_RoutingMethodCircuit_custom() -> None: + test_c = Circuit(3).CX(0, 1).CX(0, 2).CX(1, 2) + nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] + test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) + + test_mm = MappingManager(test_a) + test_mm.route_circuit( + test_c, + [RoutingMethodCircuit(route_subcircuit_func, check_subcircuit_func_true, 5, 5)], + ) + routed_commands = test_c.get_commands() + + assert routed_commands[0].op.type == OpType.CX + assert routed_commands[0].qubits == [nodes[0], nodes[1]] + assert routed_commands[1].op.type == OpType.SWAP + assert routed_commands[1].qubits == [nodes[0], nodes[1]] + assert routed_commands[2].op.type == OpType.CX + assert routed_commands[2].qubits == [nodes[1], nodes[2]] + assert routed_commands[3].op.type == OpType.SWAP + assert routed_commands[3].qubits == [nodes[0], nodes[1]] + assert routed_commands[4].op.type == OpType.CX + assert routed_commands[4].qubits == [nodes[1], nodes[2]] + + +def test_RoutingMethodCircuit_custom_list() -> None: + test_c = Circuit(3).CX(0, 1).CX(0, 2).CX(1, 2) + nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] + test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) + + test_mm = MappingManager(test_a) + test_mm.route_circuit( + test_c, + [ + RoutingMethodCircuit( + route_subcircuit_func, check_subcircuit_func_false, 5, 5 + ), + LexiRouteRoutingMethod(50), + ], + ) + routed_commands = test_c.get_commands() + + assert routed_commands[0].op.type == OpType.CX + assert routed_commands[0].qubits == [nodes[1], nodes[0]] + assert routed_commands[1].op.type == OpType.CX + assert routed_commands[1].qubits == [nodes[1], nodes[2]] + assert routed_commands[2].op.type == OpType.SWAP + assert routed_commands[2].qubits == [nodes[2], nodes[1]] + assert routed_commands[3].op.type == OpType.CX + assert routed_commands[3].qubits == [nodes[0], nodes[1]] + + test_c = Circuit(3).CX(0, 1).CX(0, 2).CX(1, 2) + test_mm.route_circuit( + test_c, + [ + RoutingMethodCircuit( + route_subcircuit_func, check_subcircuit_func_true, 5, 5 + ), + LexiRouteRoutingMethod(50), + ], + ) + routed_commands = test_c.get_commands() + assert routed_commands[0].op.type == OpType.CX + assert routed_commands[0].qubits == [nodes[0], nodes[1]] + assert routed_commands[1].op.type == OpType.SWAP + assert routed_commands[1].qubits == [nodes[0], nodes[1]] + assert routed_commands[2].op.type == OpType.CX + assert routed_commands[2].qubits == [nodes[1], nodes[2]] + assert routed_commands[3].op.type == OpType.SWAP + assert routed_commands[3].qubits == [nodes[0], nodes[1]] + assert routed_commands[4].op.type == OpType.CX + assert routed_commands[4].qubits == [nodes[1], nodes[2]] + + +if __name__ == "__main__": + test_LexiRouteRoutingMethod() + test_RoutingMethodCircuit_custom() + test_RoutingMethodCircuit_custom_list() diff --git a/tket/src/Architecture/Architectures.cpp b/tket/src/Architecture/Architectures.cpp index 3494cedf1f..b0af8db1c2 100644 --- a/tket/src/Architecture/Architectures.cpp +++ b/tket/src/Architecture/Architectures.cpp @@ -24,8 +24,37 @@ namespace tket { +// basic implementation that works off same prior assumptions +// TODO: Update this for more mature systems of multi-qubit gates +bool Architecture::valid_operation( + /*const OpType& optype, */ const std::vector& uids) const { + if (uids.size() == + 1) { // TODO: for simple case here this should probably not pass if + // uid_exists[uids[0]] == FALSE, but should be fine for now? + return true; + } else if (uids.size() == 2) { + if (this->uid_exists(uids[0]) && this->uid_exists(uids[1]) && + (this->connection_exists(uids[0], uids[1]) || + this->connection_exists(uids[1], uids[0]))) { + return true; + } + } else if (uids.size() == 3) { + bool con_0_exists = + (this->connection_exists(uids[0], uids[1]) || + this->connection_exists(uids[1], uids[0])); + bool con_1_exists = + (this->connection_exists(uids[2], uids[1]) || + this->connection_exists(uids[1], uids[2])); + if (this->uid_exists(uids[0]) && this->uid_exists(uids[1]) && + this->uid_exists(uids[2]) && con_0_exists && con_1_exists) { + return true; + } + } + return false; +} + Architecture Architecture::create_subarch( - const std::vector& subarc_nodes) { + const std::vector& subarc_nodes) const { Architecture subarc(subarc_nodes); for (auto [u1, u2] : get_connections_vec()) { if (subarc.uid_exists(u1) && subarc.uid_exists(u2)) { @@ -117,15 +146,13 @@ node_set_t Architecture::remove_worst_nodes(unsigned num) { } static bool lexicographical_comparison( - const std::vector& dist1, - const std::vector& dist2) { + const std::vector& dist1, const std::vector& dist2) { return std::lexicographical_compare( dist1.begin(), dist1.end(), dist2.begin(), dist2.end()); } int tri_lexicographical_comparison( - const std::vector& dist1, - const std::vector& dist2) { + const std::vector& dist1, const std::vector& dist2) { // add assertion that these are the same size distance vectors auto start_dist1 = dist1.cbegin(); auto start_dist2 = dist2.cbegin(); @@ -168,7 +195,7 @@ std::optional Architecture::find_worst_node( return std::nullopt; } - std::vector worst_distances, temp_distances; + std::vector worst_distances, temp_distances; Node worst_node = *bad_nodes.begin(); worst_distances = get_distances(worst_node); for (Node temp_node : bad_nodes) { @@ -180,9 +207,9 @@ std::optional Architecture::find_worst_node( worst_node = temp_node; worst_distances = temp_distances; } else if (distance_comp == -1) { - std::vector temp_distances_full = + std::vector temp_distances_full = original_arch.get_distances(temp_node); - std::vector worst_distances_full = + std::vector worst_distances_full = original_arch.get_distances(worst_node); if (lexicographical_comparison( temp_distances_full, worst_distances_full)) { diff --git a/tket/src/Architecture/Architectures.hpp b/tket/src/Architecture/Architectures.hpp index d192d3e68a..fbd5b7f458 100644 --- a/tket/src/Architecture/Architectures.hpp +++ b/tket/src/Architecture/Architectures.hpp @@ -29,6 +29,7 @@ #include "Utils/MatrixAnalysis.hpp" #include "Utils/TketLog.hpp" #include "Utils/UnitID.hpp" + namespace tket { using dist_vec = graphs::dist_vec; @@ -69,9 +70,12 @@ class Architecture : public graphs::UIDConnectivity { node_set_t get_articulation_points() const; node_set_t get_articulation_points(const Architecture &subarc) const; + bool valid_operation( + /*const OpType& optype, */ const std::vector &uids) const; + /* returns new Architecture that is generated by a subset of nodes of `this` */ - Architecture create_subarch(const std::vector &nodes); + Architecture create_subarch(const std::vector &nodes) const; // Returns vectors of nodes which correspond to lines of specified length std::vector get_lines( @@ -165,6 +169,8 @@ class SquareGrid : public Architecture { unsigned layers; }; +typedef std::shared_ptr ArchitecturePtr; + int tri_lexicographical_comparison( const dist_vec &dist1, const dist_vec &dist2); diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index e747dd8798..03ed3e3333 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -74,6 +74,7 @@ set(TKET_OPS_DIR ${TKET_SRC_DIR}/Ops) set(TKET_GATE_DIR ${TKET_SRC_DIR}/Gate) set(TKET_SIMULATION_DIR ${TKET_SRC_DIR}/Simulation) set(TKET_ROUTING_DIR ${TKET_SRC_DIR}/Routing) +set(TKET_MAPPING_DIR ${TKET_SRC_DIR}/Mapping) set(TKET_TOKEN_SWAPPING_DIR ${TKET_SRC_DIR}/TokenSwapping) set(TKET_TRANSFORM_DIR ${TKET_SRC_DIR}/Transformations) set(TKET_CHARACTERISATION_DIR ${TKET_SRC_DIR}/Characterisation) @@ -225,6 +226,15 @@ set(TKET_SOURCES # Architecture ${TKET_ARCHITECTURE_DIR}/Architectures.cpp + + # Mapping + ${TKET_MAPPING_DIR}/MappingFrontier.cpp + ${TKET_MAPPING_DIR}/RoutingMethodCircuit.cpp + ${TKET_MAPPING_DIR}/MappingManager.cpp + ${TKET_MAPPING_DIR}/LexicographicalComparison.cpp + ${TKET_MAPPING_DIR}/LexiRoute.cpp + + # Architecture Aware Synthesis ${TKET_AAS_DIR}/Path.cpp ${TKET_AAS_DIR}/SteinerTree.cpp diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp new file mode 100644 index 0000000000..2e5b70d55c --- /dev/null +++ b/tket/src/Mapping/LexiRoute.cpp @@ -0,0 +1,580 @@ +#include "Mapping/LexiRoute.hpp" + +#include "Mapping/MappingFrontier.hpp" + +namespace tket { + +LexiRoute::LexiRoute( + const ArchitecturePtr& _architecture, + std::shared_ptr& _mapping_frontier) + : architecture_(_architecture), mapping_frontier_(_mapping_frontier) { + this->set_interacting_uids(); + // set initial logical->physical labelling + for (const Qubit& qb : this->mapping_frontier_->circuit_.all_qubits()) { + this->labelling_.insert({qb, qb}); + Node n(qb); + // store which Node have been asigned to Circuit already + if (this->architecture_->uid_exists(n)) { + this->assigned_nodes_.insert(n); + } + } +} + +void LexiRoute::merge_with_ancilla(const UnitID& merge, const UnitID& ancilla) { + // get output and input vertices + Vertex merge_v_in = this->mapping_frontier_->circuit_.get_in(merge); + Vertex merge_v_out = this->mapping_frontier_->circuit_.get_out(merge); + Vertex ancilla_v_out = this->mapping_frontier_->circuit_.get_out(ancilla); + // find source vertex & port of merge_v_out + // output vertex, so can assume single edge + Edge merge_out_edge = + this->mapping_frontier_->circuit_.get_nth_out_edge(merge_v_in, 0); + Edge ancilla_in_edge = + this->mapping_frontier_->circuit_.get_nth_in_edge(ancilla_v_out, 0); + // Find port number + port_t merge_target_port = + this->mapping_frontier_->circuit_.get_target_port(merge_out_edge); + port_t ancilla_source_port = + this->mapping_frontier_->circuit_.get_source_port(ancilla_in_edge); + // Find vertices + Vertex merge_v_target = + this->mapping_frontier_->circuit_.target(merge_out_edge); + Vertex ancilla_v_source = + this->mapping_frontier_->circuit_.source(ancilla_in_edge); + + // remove and replace edges + this->mapping_frontier_->circuit_.remove_edge(merge_out_edge); + this->mapping_frontier_->circuit_.remove_edge(ancilla_in_edge); + this->mapping_frontier_->circuit_.add_edge( + {ancilla_v_source, ancilla_source_port}, + {merge_v_target, merge_target_port}, EdgeType::Quantum); + + // instead of manually updating all boundaries, we change which output vertex + // the qubit paths to + Edge merge_in_edge = + this->mapping_frontier_->circuit_.get_nth_in_edge(merge_v_out, 0); + port_t merge_source_port = + this->mapping_frontier_->circuit_.get_source_port(merge_in_edge); + Vertex merge_v_source = + this->mapping_frontier_->circuit_.source(merge_in_edge); + + this->mapping_frontier_->circuit_.remove_edge(merge_in_edge); + this->mapping_frontier_->circuit_.add_edge( + {merge_v_source, merge_source_port}, {ancilla_v_out, 0}, + EdgeType::Quantum); + + // remove empty vertex wire, relabel dag vertices + this->mapping_frontier_->circuit_.dag[merge_v_in].op = + get_op_ptr(OpType::noop); + this->mapping_frontier_->circuit_.dag[merge_v_out].op = + get_op_ptr(OpType::noop); + this->mapping_frontier_->circuit_.remove_vertex( + merge_v_in, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); + this->mapping_frontier_->circuit_.remove_vertex( + merge_v_out, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); + + // Can now just erase "merge" qubit from the circuit + this->mapping_frontier_->circuit_.boundary.get().erase(merge); + + if (this->mapping_frontier_->circuit_.unit_bimaps_.initial) { + this->mapping_frontier_->circuit_.unit_bimaps_.initial->right.erase(merge); + } + if (this->mapping_frontier_->circuit_.unit_bimaps_.final) { + this->mapping_frontier_->circuit_.unit_bimaps_.final->right.erase(merge); + } +} + +bool LexiRoute::assign_at_distance( + const UnitID& assignee, const Node& root, unsigned distances) { + node_set_t valid_nodes; + for (const Node& neighbour : + this->architecture_->uids_at_distance(root, distances)) { + if (this->assigned_nodes_.find(neighbour) == this->assigned_nodes_.end() || + this->mapping_frontier_->ancilla_nodes_.find(neighbour) != + this->mapping_frontier_->ancilla_nodes_.end()) { + valid_nodes.insert(neighbour); + } + } + if (valid_nodes.size() == 1) { + auto it = valid_nodes.begin(); + if (this->mapping_frontier_->ancilla_nodes_.find(*it) != + this->mapping_frontier_->ancilla_nodes_.end()) { + // => node *it is already present in circuit, but as an ancilla + this->merge_with_ancilla(assignee, *it); + this->mapping_frontier_->ancilla_nodes_.erase(*it); + this->labelling_.erase(*it); + this->labelling_[assignee] = *it; + } else { + this->labelling_[assignee] = *it; + this->assigned_nodes_.insert(*it); + } + return true; + } + if (valid_nodes.size() > 1) { + auto it = valid_nodes.begin(); + lexicographical_distances_t winning_distances = + this->architecture_->get_distances(*it); + Node preserved_node = *it; + ++it; + for (; it != valid_nodes.end(); ++it) { + lexicographical_distances_t comparison_distances = + this->architecture_->get_distances(*it); + if (comparison_distances < winning_distances) { + preserved_node = *it; + winning_distances = comparison_distances; + } + } + if (this->mapping_frontier_->ancilla_nodes_.find(preserved_node) != + this->mapping_frontier_->ancilla_nodes_.end()) { + // => node *it is already present in circuit, but as an ancilla + this->merge_with_ancilla(assignee, preserved_node); + this->mapping_frontier_->ancilla_nodes_.erase(preserved_node); + this->labelling_.erase(preserved_node); + this->labelling_[assignee] = preserved_node; + } else { + // add ancilla case + this->labelling_[assignee] = preserved_node; + this->assigned_nodes_.insert(preserved_node); + } + return true; + } + return false; +} + +bool LexiRoute::update_labelling() { + // iterate through interacting qubits, assigning them to an Architecture Node + // if they aren't already + bool relabelled = false; + for (const auto& pair : this->interacting_uids_) { + bool uid_0_exist = + this->architecture_->uid_exists(Node(this->labelling_[pair.first])); + bool uid_1_exist = + this->architecture_->uid_exists(Node(this->labelling_[pair.second])); + if (!uid_0_exist || !uid_1_exist) { + relabelled = true; + } + if (!uid_0_exist && !uid_1_exist) { + // Place one on free unassigned qubit + // Then place second later + // condition => No ancilla qubits assigned, so don't checl + if (this->assigned_nodes_.size() == 0) { + // find nodes with best averaged distance to other nodes + // place it there... + std::set max_degree_uids = this->architecture_->max_degree_uids(); + auto it = max_degree_uids.begin(); + lexicographical_distances_t winning_distances = + this->architecture_->get_distances(*it); + Node preserved_node = Node(*it); + ++it; + for (; it != max_degree_uids.end(); ++it) { + lexicographical_distances_t comparison_distances = + this->architecture_->get_distances(*it); + if (comparison_distances < winning_distances) { + preserved_node = Node(*it); + winning_distances = comparison_distances; + } + } + this->labelling_[pair.first] = preserved_node; + this->assigned_nodes_.insert(preserved_node); + uid_0_exist = true; + // given best node, do something + } else { + auto root_it = this->assigned_nodes_.begin(); + while (!uid_0_exist && root_it != this->assigned_nodes_.end()) { + Node root = *root_it; + uid_0_exist = this->assign_at_distance(pair.first, root, 1); + ++root_it; + } + if (!uid_0_exist) { + throw LexiRouteError( + "Unable to assign physical qubit - no free qubits remaining."); + } + } + } + if (!uid_0_exist && uid_1_exist) { + Node root(this->labelling_[pair.second]); + for (unsigned k = 1; k <= this->architecture_->get_diameter(); k++) { + uid_0_exist = this->assign_at_distance(pair.first, root, k); + if (uid_0_exist) { + break; + } + } + if (!uid_0_exist) { + throw LexiRouteError( + "Unable to assign physical qubit - no free qubits remaining."); + } + } + if (uid_0_exist && !uid_1_exist) { + Node root(this->labelling_[pair.first]); + for (unsigned k = 1; k <= this->architecture_->get_diameter(); k++) { + uid_1_exist = this->assign_at_distance(pair.second, root, k); + if (uid_1_exist) { + break; + } + } + if (!uid_1_exist) { + throw LexiRouteError( + "Unable to assign physical qubit - no free qubits remaining."); + } + } + } + return relabelled; +} + +/** + * LexiRoute::set_interacting_uids + * Updates this->interacting_uids_ with all "interacting" pairs + * of UnitID in this->mapping_frontier_ + */ +void LexiRoute::set_interacting_uids(bool assigned_only) { + // return types + this->interacting_uids_.clear(); + for (auto it = + this->mapping_frontier_->quantum_boundary->get().begin(); + it != this->mapping_frontier_->quantum_boundary->get().end(); + ++it) { + Edge e0 = this->mapping_frontier_->circuit_.get_nth_out_edge( + it->second.first, it->second.second); + Vertex v0 = this->mapping_frontier_->circuit_.target(e0); + // should never be input vertex, so can always use in_edges + int n_edges = this->mapping_frontier_->circuit_.n_in_edges_of_type( + v0, EdgeType::Quantum); + if (n_edges == 2) { + auto jt = it; + ++jt; + for (; + jt != this->mapping_frontier_->quantum_boundary->get().end(); + ++jt) { + // i.e. if vertices match + Edge e1 = this->mapping_frontier_->circuit_.get_nth_out_edge( + jt->second.first, jt->second.second); + Vertex v1 = this->mapping_frontier_->circuit_.target(e1); + if (v0 == v1) { + // we can assume a qubit will only be in one interaction + // we can assume from how we iterate through pairs that each qubit + // will only be found in one match + if (!assigned_only || + (this->architecture_->uid_exists(Node(it->first)) && + this->architecture_->uid_exists(Node(jt->first)))) { + interacting_uids_.insert({it->first, jt->first}); + interacting_uids_.insert({jt->first, it->first}); + } + } + } + } else if (n_edges != 1) { + TKET_ASSERT(!"Vertex should only have 1 or 2 edges."); + } + } +} + +swap_set_t LexiRoute::get_candidate_swaps() { + swap_set_t candidate_swaps; + for (const auto& interaction : this->interacting_uids_) { + Node assigned_first = Node(this->labelling_[interaction.first]); + std::vector adjacent_uids_0 = + this->architecture_->uids_at_distance(assigned_first, 1); + if (adjacent_uids_0.size() == 0) { + throw LexiRouteError( + assigned_first.repr() + " has no adjacent Node in Architecture."); + } + for (const Node& neighbour : adjacent_uids_0) { + if (candidate_swaps.find({neighbour, assigned_first}) == + candidate_swaps.end()) { + candidate_swaps.insert({assigned_first, neighbour}); + } + } + Node assigned_second = Node(this->labelling_[interaction.second]); + std::vector adjacent_uids_1 = + this->architecture_->uids_at_distance(assigned_second, 1); + if (adjacent_uids_1.size() == 0) { + throw LexiRouteError( + assigned_first.repr() + " has no adjacent Node in Architecture."); + } + for (const Node& neighbour : adjacent_uids_1) { + if (candidate_swaps.find({neighbour, assigned_second}) == + candidate_swaps.end()) { + candidate_swaps.insert({assigned_second, neighbour}); + } + } + } + return candidate_swaps; +} + +bool is_vertex_CX(const Circuit& circ_, const Vertex& v) { + OpType ot = circ_.get_OpType_from_Vertex(v); + if (ot != OpType::CX) { + if (ot == OpType::Conditional) { + const Conditional& b = + static_cast(*circ_.get_Op_ptr_from_Vertex(v)); + if (b.get_op()->get_type() != OpType::CX) { + return false; + } + } else { + return false; + } + } + return true; +} + +std::pair LexiRoute::check_bridge( + const std::pair& swap, unsigned lookahead) { + std::pair output = {false, false}; + // first confirm whether it even has an interaction + auto it = this->interacting_uids_.find(swap.first); + if (it != this->interacting_uids_.end()) { // => in interaction + if (this->architecture_->get_distance(swap.first, Node(it->second)) == + 2) { // => could be bridge + // below should always return correct object given prior checks + VertPort vp = + (*this->mapping_frontier_->quantum_boundary->find(swap.first)).second; + Edge out_edge = this->mapping_frontier_->circuit_.get_nth_out_edge( + vp.first, vp.second); + output.first = is_vertex_CX( + this->mapping_frontier_->circuit_, + this->mapping_frontier_->circuit_.target(out_edge)); + } + } + // repeat for second swap + it = this->interacting_uids_.find(swap.second); + if (it != this->interacting_uids_.end()) { + if (this->architecture_->get_distance(swap.second, Node(it->second)) == 2) { + VertPort vp = + (*this->mapping_frontier_->quantum_boundary->find(swap.second)) + .second; + Edge out_edge = this->mapping_frontier_->circuit_.get_nth_out_edge( + vp.first, vp.second); + output.second = is_vertex_CX( + this->mapping_frontier_->circuit_, + this->mapping_frontier_->circuit_.target(out_edge)); + } + } + if ((output.first && output.second) || (!output.first && !output.second)) { + return {0, 0}; + } + // implies conditions are set to at least check if BRIDGE is better + swap_set_t candidate_swaps = { + swap, + {swap.first, + swap.first}}; // second swap here will just compare the base case + + // as with best swap finder, we create a set of candidate swap gates and + // then find best, except with only 2 swap (best swap and no swap) + while (candidate_swaps.size() > 1 /*some lookahead parameter*/) { + this->mapping_frontier_->advance_next_2qb_slice(lookahead); + // true bool means it only sets interacting uids if both uids are in + // architecture + this->set_interacting_uids(true); + // if 0, just take first swap rather than place + if (this->interacting_uids_.size() == 0) { + candidate_swaps = {*candidate_swaps.begin()}; + } else { + interacting_nodes_t convert_uids; + for (const auto& p : this->interacting_uids_) { + convert_uids.insert( + {Node(this->labelling_[p.first]), + Node(this->labelling_[p.second])}); + } + LexicographicalComparison lookahead_lc(this->architecture_, convert_uids); + lookahead_lc.remove_swaps_lexicographical(candidate_swaps); + } + } + // condition implies bridge is chosen + // if both remained then lexicographically equivalent under given conditions + // so either can be added with same consequences (for given hyper + // parameters) + if (*candidate_swaps.begin() == swap) { + output = {0, 0}; + } + return output; +} + +// Returns the distance between n1 and p1 and the distance between n2 and p2, +// distance ordered (greatest first) +const std::pair LexiRoute::pair_distances( + const Node& p0_first, const Node& p0_second, const Node& p1_first, + const Node& p1_second) const { + if (!this->architecture_->uid_exists(p0_first) || + !this->architecture_->uid_exists(p0_second) || + !this->architecture_->uid_exists(p1_first) || + !this->architecture_->uid_exists(p1_second)) { + throw LexiRouteError( + "Node passed to LexiRoute::pair_distances not in architecture."); + } + size_t curr_dist1 = this->architecture_->get_distance(p0_first, p0_second); + size_t curr_dist2 = this->architecture_->get_distance(p1_first, p1_second); + return (curr_dist1 > curr_dist2) ? std::make_pair(curr_dist1, curr_dist2) + : std::make_pair(curr_dist2, curr_dist1); +} + +void LexiRoute::remove_swaps_decreasing(swap_set_t& swaps) { + swap_set_t remaining_swaps; + Node pair_first, pair_second; + for (const auto& swap : swaps) { + auto it = this->interacting_uids_.find(swap.first); + if (it != this->interacting_uids_.end()) { + pair_first = Node(it->second); + } else { + pair_first = swap.first; + } + if (pair_first == swap.second) { + continue; + } + auto jt = this->interacting_uids_.find(swap.second); + if (jt != this->interacting_uids_.end()) { + pair_second = Node(jt->second); + } else { + pair_second = swap.second; + } + if (pair_second == swap.first) { + continue; + } + + const std::pair& curr_dists = + this->pair_distances(swap.first, pair_first, swap.second, pair_second); + const std::pair& news_dists = + this->pair_distances(swap.second, pair_first, swap.first, pair_second); + if (news_dists >= curr_dists) { + continue; + } + remaining_swaps.insert(swap); + } +} + +void LexiRoute::solve(unsigned lookahead) { + // store a copy of the original this->mapping_frontier_->quantum_boundray + // this object will be updated and reset throughout the swap picking procedure + // so need to return it to original setting at end + unit_vertport_frontier_t copy; + for (const std::pair& pair : + this->mapping_frontier_->quantum_boundary->get()) { + copy.insert({pair.first, pair.second}); + } + // some Qubits in boundary of this->mapping_frontier_->circuit_ may not be + // this->architecture_ Node If true, assign physical meaning by replacing with + // Node from this->architecture_ + // "candidate_swaps" are connected pairs of Node in this->architecture_ s.t. + // at least one is in an "interaction" and both are "assigned" i.e. present in + // this->mapping_frontier_->circuit + + bool updated = this->update_labelling(); + if (updated) { + // update unit id at boundary in case of relabelling + this->mapping_frontier_->update_quantum_boundary_uids(this->labelling_); + return; + } + swap_set_t candidate_swaps = this->get_candidate_swaps(); + this->remove_swaps_decreasing(candidate_swaps); + // Only want to substitute a single swap + // check next layer of interacting qubits and remove swaps until only one + // lexicographically superior swap is left + unsigned counter = 0; + while (candidate_swaps.size() > 1 && counter < lookahead) { + // if 0, just take first swap rather than place + if (this->interacting_uids_.size() == 0) { + break; + } else { + interacting_nodes_t convert_uids; + for (const auto& p : this->interacting_uids_) { + convert_uids.insert( + {Node(this->labelling_[p.first]), + Node(this->labelling_[p.second])}); + } + LexicographicalComparison lookahead_lc(this->architecture_, convert_uids); + lookahead_lc.remove_swaps_lexicographical(candidate_swaps); + } + counter++; + this->mapping_frontier_->advance_next_2qb_slice(lookahead); + // true bool means it only sets interacting uids if both uids are in + // architecture + this->set_interacting_uids(true); + } + + auto it = candidate_swaps.end(); + --it; + std::pair chosen_swap = *it; + this->mapping_frontier_->set_quantum_boundary(copy); + + this->set_interacting_uids(); + std::pair check = this->check_bridge(chosen_swap, lookahead); + + // set for final time, to allow gates to be correctly inserted, but then leave + // as is + this->mapping_frontier_->set_quantum_boundary(copy); + if (!check.first && !check.second) { + // update circuit with new swap + // final_labelling is initial labelling permuted by single swap + this->mapping_frontier_->add_swap(chosen_swap.first, chosen_swap.second); + } else { + // only need to reset in bridge case + this->set_interacting_uids(); + if (check.first) { + Node target = Node(this->interacting_uids_[chosen_swap.first]); + auto path = this->architecture_->get_path(chosen_swap.first, target); + // does path include root and target? + Node central = Node(path[1]); + this->mapping_frontier_->add_bridge(chosen_swap.first, central, target); + } + if (check.second) { + Node target = Node(this->interacting_uids_[chosen_swap.second]); + auto path = this->architecture_->get_path(chosen_swap.second, target); + // does path include root and target? + Node central = Node(path[1]); + this->mapping_frontier_->add_bridge(chosen_swap.second, central, target); + } + } + // TODO: Refactor the following to happen during add_swap and add_bridge + // methods + if (copy.size() < this->mapping_frontier_->quantum_boundary->size()) { + // implies ancilla qubit is added + // find ancilla qubit, find swap vertex and port by looking at boundary, + // store in ancillas type + for (auto it = + this->mapping_frontier_->quantum_boundary->get().begin(); + it != this->mapping_frontier_->quantum_boundary->get().end(); + ++it) { + bool match = false; + for (auto jt = copy.get().begin(); jt != copy.get().end(); + ++jt) { + if (it->first == jt->first) { + match = true; + break; + } + } + if (!match) { + // extra will be added in it + // This is same condition as SWAP case, which means "Ancilla" has + // already moved to a new physical node + if (!check.first && !check.second) { + if (Node(it->first) == chosen_swap.first) { + this->mapping_frontier_->ancilla_nodes_.insert(chosen_swap.second); + } else { + this->mapping_frontier_->ancilla_nodes_.insert(chosen_swap.first); + } + } else { + this->mapping_frontier_->ancilla_nodes_.insert(Node(it->first)); + } + break; + } + } + } + return; +} + +LexiRouteRoutingMethod::LexiRouteRoutingMethod(unsigned _max_depth) + : max_depth_(_max_depth){}; + +bool LexiRouteRoutingMethod::check_method( + const std::shared_ptr& /*mapping_frontier*/, + const ArchitecturePtr& /*architecture*/) const { + return true; +} + +unit_map_t LexiRouteRoutingMethod::routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + LexiRoute lr(architecture, mapping_frontier); + lr.solve(this->max_depth_); + return {}; +} + +} // namespace tket diff --git a/tket/src/Mapping/LexiRoute.hpp b/tket/src/Mapping/LexiRoute.hpp new file mode 100644 index 0000000000..ed0393f347 --- /dev/null +++ b/tket/src/Mapping/LexiRoute.hpp @@ -0,0 +1,192 @@ +#ifndef _TKET_LexiRoute_H_ +#define _TKET_LexiRoute_H_ + +#include "Mapping/LexicographicalComparison.hpp" +#include "Mapping/MappingFrontier.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class LexiRouteError : public std::logic_error { + public: + explicit LexiRouteError(const std::string& message) + : std::logic_error(message) {} +}; + +/** + * A class for modifiying a Circuit held in a MappingFrontier object + * with either an Architecture permitted single SWAP gate or BRIDGE gate. + * Used in the LexiRouteRoutingMethod class which provides a subcircuit + * modification method for MappingManager. Used in solution presented in "On the + * qubit routing problem" -> arXiv:1902.08091 + */ +class LexiRoute { + public: + /** + * Class Constructor + * @param _architecture Architecture object added operations must respect + * @param _mapping_frontier Contains Circuit object to be modified + */ + LexiRoute( + const ArchitecturePtr& _architecture, + std::shared_ptr& _mapping_frontier); + + /** + * When called, LexiRoute::solve will modify the Circuit held in + * MappingFrontier object passed at class construction. Either a SWAP gate + * will be inserted at the input boundary of the held Circuit or a CX gate + * will be transformed into a BRIDGE gate. The added SWAP or BRIDGE gate will + * be valid for the Architecture passed at class construction. Additionally, + * an "unlabelled" Qubit in the Circuit may be relabelled to a Node in the + * Architecture, or an "unlabelled" Qubit may have its path merged with an + * ancilla qubit. + * The decision making is based on the heuristic outlined in arXiv:1902.08091. + * + * @param lookahead Number of slices to lookahead at when determining best + * SWAP or BRIDGE + */ + void solve(unsigned lookahead); + + private: + /** + * this->interacting_uids_ attribute is a map where key is one UnitID + * and value is the UnitID it needs to be adjacent to. + * This map is implicitly updated whenever a logical SWAP is inserted. + * set_interacting_uids determines this map for the first parallel set of + * interacting UnitID in the Circuit held in this->mapping_frontier_ + * @param assigned_only If true, only include interactions where both UnitID + * are in this->architecture_. + */ + void set_interacting_uids(bool assigned_only = false); + + /** + * Merges the qubit paths of "merge" and "ancilla" in mapping frontier circuit + * such that the output of the final ancilla vertex leads into the input of + * the first merge vertex. + * + * @param merge UnitID to which ancilla path is prepended + * @param ancilla UnitID of ancilla opeartions + */ + void merge_with_ancilla(const UnitID& merge, const UnitID& ancilla); + + /** + * If there is some "free" Node in Architecture at distance "distances" on + * the connectivity graph, assign (relable) UnitID assignee to it. "free" + * => not in Circuit. If no unassigned node at distances from root, return + * false. + * @param assignee UnitID not in Architecture to relabel + * @param root Node in Architecture + * @param distances Distance at which to find free Node from root at + * @return True if assigned, else False + */ + bool assign_at_distance( + const UnitID& assignee, const Node& root, unsigned distances); + + /** + * If this->set_interacting_uids assigned_only bool is false then the + * this->interacting_uids attribute may have key and value UnitID not in + * this->architecture_. + * update_labelling assigns these non-architecture UnitID to some Architecture + * UnitID, updating the this->labelling_ attribute. + * @return True if anything relabelled, else false + */ + bool update_labelling(); + + /** + * Returns a set of pair of UnitID, each denoting a SWAP. + * Returned SWAP have at least one UnitID in interacting_uids_. + * This is such that enacting any of these SWAP will alter the distance + * between some interacting UnitID. + * @return std::pair suitable for addition to Circuit + */ + swap_set_t get_candidate_swaps(); + + /** + * Proposed swap will have two Node + * Each of these Node may be in some interaction in the first layer of circuit + * held in mapping_frontier. If either of these Node are in an interaction, + * check whether said interaction is a CX interaction, and if the pair of Node + * in the interaction are at distance 2. If true, compare lexicographical + * distances between no swap and given swap assuming distance 2 interactions + * are complete. If no swap is better, update return object to reflect this. + * @param swap Pair of Node comprising SWAP for checking + * @param lookahead Number of steps of lookahead emplyed for comparison + * @return Pair of bool, where true implies BRIDGE to be added + */ + std::pair check_bridge( + const std::pair& swap, unsigned lookahead); + + /** + * Returns a pair of distances, where the distances are between n1 & p1, and + * n2 & p2. Pair object is ordered such that the greatest distance is first. + * + * @param p0_first First Node in first interaction to find distance between + * @param p0_second Second Node in first interaction to find distance between + * @param p1_first First Node in second interaction to find distance between + * @param p1_second Second Node in second interaction to find distance between + * @return Pair of size_t, being distances on architecture graph + */ + const std::pair pair_distances( + const Node& p0_first, const Node& p0_second, const Node& p1_first, + const Node& p1_second) const; + + /** + * It is always expected that at least one Node in a SWAP will be in some + * interaction. This method checks that the given swap will strictly decrease + * the distance for this interaction, and removes it from the swaps set if + * not. + * + * @param swaps Potential swaps to remove from + */ + void remove_swaps_decreasing(swap_set_t& swaps); + + // Architecture all new physical operations must respect + ArchitecturePtr architecture_; + // Contains circuit for finding SWAP from and non-routed/routed boundary + std::shared_ptr& mapping_frontier_; + // Map between UnitID and UnitID they interact with at boundary + unit_map_t interacting_uids_; + // Map between original circuit UnitID and new UnitID due to dynamic + // placement + unit_map_t labelling_; + // Set tracking which Architecture Node are present in Circuit + std::set assigned_nodes_; +}; + +// Child class of RoutingMethod, with overloaded methods for routing +// MappingFrontier objects +class LexiRouteRoutingMethod : public RoutingMethod { + public: + /** + * Checking and Routing methods redefined using LexiRoute. Only circuit depth, + * corresponding to lookahead, is a required parameter. + * + * @param _max_depth Number of layers of gates checked inr outed subcircuit. + */ + LexiRouteRoutingMethod(unsigned _max_depth); + + /** + * @return true if method can route subcircuit, false if not + */ + bool check_method( + const std::shared_ptr& /*mapping_frontier*/, + const ArchitecturePtr& /*architecture*/) const; + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + unit_map_t routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const; + + private: + unsigned max_depth_; +}; + +} // namespace tket + +#endif \ No newline at end of file diff --git a/tket/src/Mapping/LexicographicalComparison.cpp b/tket/src/Mapping/LexicographicalComparison.cpp new file mode 100644 index 0000000000..3d51a0fad8 --- /dev/null +++ b/tket/src/Mapping/LexicographicalComparison.cpp @@ -0,0 +1,131 @@ +#include "Mapping/LexicographicalComparison.hpp" + +namespace tket { + +/** + * Assumes all node in interacting_nodes in architecture, and ignores if they + * aren't maybe throw error instead? + */ +LexicographicalComparison::LexicographicalComparison( + const ArchitecturePtr& _architecture, + const interacting_nodes_t& _interacting_nodes) + : architecture_(_architecture), interacting_nodes_(_interacting_nodes) { + unsigned diameter = this->architecture_->get_diameter(); + + lexicographical_distances_t distance_vector(diameter, 0); + for (const auto& interaction : this->interacting_nodes_) { + // If Node not in architecture, don't add + if (!this->architecture_->uid_exists(interaction.first) || + !this->architecture_->uid_exists(interaction.second)) { + throw LexicographicalComparisonError( + "Constructor passed some interacting node not in architecture."); + } + // key->value already copied, assign reverse to map for later ease + this->interacting_nodes_[interaction.second] = interaction.first; + unsigned distance = this->architecture_->get_distance( + interaction.first, interaction.second); + if (distance > 0) { + ++distance_vector[diameter - distance]; + } + } + this->lexicographical_distances = distance_vector; +} + +void LexicographicalComparison::increment_distances( + lexicographical_distances_t& distances, + const std::pair& interaction, int increment) const { + const unsigned distances_index = + this->architecture_->get_diameter() - + this->architecture_->get_distance(interaction.first, interaction.second); + if (distances[distances_index] == 0 && increment < 0) { + throw LexicographicalComparisonError( + "Negative increment value is larger than value held at index, " + "modification not " + "allowed."); + } + distances[distances_index] += increment; +} + +/** + * getter + */ +lexicographical_distances_t +LexicographicalComparison::get_lexicographical_distances() const { + return this->lexicographical_distances; +} + +/** + * get_updated_distances + * updates the "distance vector" (this->lexicographical_distances) to reflect + * the distance between interacting logical qubits given that the logical qubits + * present in "swap" have swapped physical qubits (Node) + */ +lexicographical_distances_t LexicographicalComparison::get_updated_distances( + const swap_t& swap) const { + // make a copy of base lexicographical distances + lexicographical_distances_t copy = this->lexicographical_distances; + if (swap.first == swap.second) { + return copy; + } + auto iq_it = this->interacting_nodes_.find(swap.first); + // first condition => first node not interacting with self, so update + // distances + if (iq_it != this->interacting_nodes_.end()) { + // update distances due to first swap node and qubit its interating with + // (assuming swap) + Node interacting = iq_it->second; + if (interacting != swap.second) { + increment_distances(copy, {swap.first, interacting}, -2); + // updates distances due to second swap node and qubit first is + // interacting with + increment_distances(copy, {swap.second, interacting}, 2); + } + } + iq_it = this->interacting_nodes_.find(swap.second); + // => second node not interacting with self + if (iq_it != this->interacting_nodes_.end()) { + Node interacting = iq_it->second; + if (interacting != swap.first) { + // update distances due to second node and qubit its interacting with + increment_distances(copy, {swap.second, interacting}, -2); + // update distannces due to frist node and qubit second node is + // interacting with + increment_distances(copy, {swap.first, interacting}, 2); + } + } + return copy; +} + +/** + * remove_swaps_lexicographical + * value x at index i of this->lexicographical_distancs => x logical qubits + * distance (diameter - i) away from the logical qubit they should be + * interacting with For each swap (swap_t) in "candidate_swaps" a + * new distances object is created given interacting_qubits Each distance for + * each swap is lexicographically compared If a distance is lexicographically + * larger than any other its corresponding swap is removed from candidate_swaps + * Therefore swaps remaining in candidate_swaps after this process are + * lexicographically identical for implied logical->physical qubit mapping and + * interacting logical + */ +void LexicographicalComparison::remove_swaps_lexicographical( + swap_set_t& candidate_swaps) const { + auto it = candidate_swaps.begin(); + lexicographical_distances_t winning_distances = + this->get_updated_distances(*it); + swap_set_t preserved_swaps = {*it}; + ++it; + for (; it != candidate_swaps.end(); ++it) { + lexicographical_distances_t comparison_distances = + this->get_updated_distances(*it); + + if (comparison_distances < winning_distances) { + preserved_swaps = {*it}; + winning_distances = comparison_distances; + } else if (comparison_distances == winning_distances) { + preserved_swaps.insert(*it); + } + } + candidate_swaps = preserved_swaps; +} +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/LexicographicalComparison.hpp b/tket/src/Mapping/LexicographicalComparison.hpp new file mode 100644 index 0000000000..c6bef60bc3 --- /dev/null +++ b/tket/src/Mapping/LexicographicalComparison.hpp @@ -0,0 +1,87 @@ +#ifndef _TKET_LexicographicalComparison_H_ +#define _TKET_LexicographicalComparison_H_ + +#include "Architecture/Architectures.hpp" +#include "Utils/BiMapHeaders.hpp" +#include "Utils/UnitID.hpp" + +namespace tket { + +typedef std::map interacting_nodes_t; +typedef std::pair swap_t; +typedef std::set swap_set_t; +typedef std::vector lexicographical_distances_t; + +class LexicographicalComparisonError : public std::logic_error { + public: + explicit LexicographicalComparisonError(const std::string& message) + : std::logic_error(message) {} +}; + +/** + * A class for running lexicographical comparisons of SWAP gates for some + * architecture and set of interacting qubits. + * Used in the 'LexiRoute' method for routing subcircuits as part of the + * MappingManager framework. + * Used in solution presented in "On the qubit routing problem" -> + * arXiv:1902.08091 + */ +class LexicographicalComparison { + public: + /** + * Class constructor + * @param _architecture Architecture object for calcuating distances from + * @param _interacting_nodes Pairs of physical Node with interacting logical + * Qubit + */ + LexicographicalComparison( + const ArchitecturePtr& _architecture, + const interacting_nodes_t& _interacting_nodes); + + /** + * Modifies some distances object by reference. + * Updates the distance between pair Node in interaction by increment. + * Increment and Interaction determined by some SWAP. + * + * @param distances Distances object updated. + * @param interaction Node pair increment distance indexing found from + * @param increment Amount to modify distance index by + */ + void increment_distances( + lexicographical_distances_t& distances, + const std::pair& interaction, int increment) const; + + /** + * Getter for lexicographical_distances_ attribute + */ + lexicographical_distances_t get_lexicographical_distances() const; + /** + * Takes a copy of Distance vector held in object and modifies it to reflect + * how distance between pairs of interacting nodes in attribute would change + * given the logical qubits asisgned to the physical node in "swap" swapped. + * + * @param swap Physical Node Logical Qubit swapped between to derive copy + * distance + */ + lexicographical_distances_t get_updated_distances(const swap_t& swap) const; + + /** + * For each swap in candidate_swaps, removes swap from set if the distance + * vector produced by modifying this->lexicographical_distances by said swap + * is lexicographically smaller to that produced for any other swap. In this + * way, only swaps with lexicographically identical swap for the given + * interacting nodes remain after the method is called. + * + * @param candidate_swaps Potential pairs of nodes for comparing and removing + */ + void remove_swaps_lexicographical(swap_set_t& candidate_swaps) const; + + private: + ArchitecturePtr architecture_; + lexicographical_distances_t lexicographical_distances; + interacting_nodes_t interacting_nodes_; +}; + +} // namespace tket + +#endif \ No newline at end of file diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp new file mode 100644 index 0000000000..03fd4cf3df --- /dev/null +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -0,0 +1,472 @@ +#include "Mapping/MappingFrontier.hpp" + +#include "Circuit/Circuit.hpp" +namespace tket { + +/** + * unit_vertport_frontier_t is , helper function returned + * UnitID given Edge + */ +UnitID get_unitid_from_unit_frontier( + const std::shared_ptr& u_frontier, + const VertPort& vp) { + for (auto it = u_frontier->get().begin(); + it != u_frontier->get().end(); ++it) { + if (it->second == vp) { + return it->first; + } + } + throw MappingFrontierError( + std::string("Edge provided not in unit_frontier_t object.")); +} + +/** + * quantum_boundary stored as vertport so that correct edge can be recovered + * after subcircuit substitution method uses Vertex and port_t and + * Circuit::get_nth_out_edge to generate unit_frontier_t object + */ +std::shared_ptr frontier_convert_vertport_to_edge( + const Circuit& circuit, + const std::shared_ptr& u_frontier) { + // make empty unit_frontier_t object + std::shared_ptr output_frontier = + std::make_shared(); + // iterate through u_frontier, convert VertPort to Edge and insert + for (const std::pair& pair : u_frontier->get()) { + output_frontier->insert( + {pair.first, + circuit.get_nth_out_edge(pair.second.first, pair.second.second)}); + } + return output_frontier; +} + +/** + * Initialise quantum_boundary and classical_boundary from + * out edges of Input vertices + */ +MappingFrontier::MappingFrontier(Circuit& _circuit) : circuit_(_circuit) { + this->quantum_boundary = std::make_shared(); + this->classical_boundary = std::make_shared(); + // Set up {UnitID, VertPort} objects for quantum and classical boundaries + for (const Qubit& qb : this->circuit_.all_qubits()) { + this->quantum_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); + } + for (const Bit& bit : this->circuit_.all_bits()) { + this->classical_boundary->insert( + {bit, + this->circuit_.get_nth_b_out_bundle(this->circuit_.get_in(bit), 0)}); + } +} + +void MappingFrontier::advance_next_2qb_slice(unsigned max_advance) { + bool boundary_updated = false; + unsigned loop = 0; + std::shared_ptr current_frontier = + frontier_convert_vertport_to_edge(this->circuit_, this->quantum_boundary); + // Get all vertices in first cut + VertexVec immediate_cut_vertices_v = + *(this->circuit_ + .next_cut(current_frontier, std::make_shared()) + .slice); + do { + // each do section first finds the next set of edges after the held set + // for edges with target vertices with all their edges presented in the + // first set + loop++; + boundary_updated = false; + // produce next frontier object + std::shared_ptr next_frontier = + std::make_shared(); + + for (const std::pair& pair : + current_frontier->get()) { + // if target_v not in immediate_cut_vertices, then do not pass it + Vertex target_v = this->circuit_.target(pair.second); + + EdgeVec in_edges = + this->circuit_.get_in_edges_of_type(target_v, EdgeType::Quantum); + + bool in_slice = + std::find( + immediate_cut_vertices_v.begin(), immediate_cut_vertices_v.end(), + target_v) != immediate_cut_vertices_v.end(); + + if ((!in_slice && in_edges.size() > 1) || + this->circuit_.get_OpType_from_Vertex(target_v) == OpType::Output) { + // Vertex either not allowed to pass, or is output vertex => update + // nothing + next_frontier->insert({pair.first, pair.second}); + } else { + // vertex can be surpassed, so update quantum_boundary and next_frontier + // with next edge + Edge next_edge = this->circuit_.get_next_edge(target_v, pair.second); + this->quantum_boundary->replace( + this->quantum_boundary->get().find(pair.first), + {pair.first, + {target_v, this->circuit_.get_source_port(next_edge)}}); + next_frontier->insert({pair.first, next_edge}); + } + } + // Given new frontier, find the actual next cut + CutFrontier next_cut = this->circuit_.next_cut( + next_frontier, std::make_shared()); + // For each vertex in a slice, if its physically permitted, update + // quantum_boundary with quantum out edges from vertex (i.e. + // next_cut.u_frontier) + for (const Vertex& vert : *next_cut.slice) { + // Output means we don't want to pass, so just leave + if (this->circuit_.get_OpType_from_Vertex(vert) == OpType::Output) { + continue; + } + EdgeVec in_edges = + this->circuit_.get_in_edges_of_type(vert, EdgeType::Quantum); + // More than 1 edge means we want to keep edges, so continue + if (in_edges.size() > 1) { + continue; + } + // can guarantee that we update now as non-updating cases have been + // continued + boundary_updated = true; + // push edge past single qubit vertex, repeat + UnitID uid = get_unitid_from_unit_frontier( + this->quantum_boundary, + {this->circuit_.source(in_edges[0]), + this->circuit_.get_source_port(in_edges[0])}); + + Edge replacement_edge = + next_cut.u_frontier->get().find(uid)->second; + + Vertex source_vertex = this->circuit_.source(replacement_edge); + port_t source_port = this->circuit_.get_source_port(replacement_edge); + + this->quantum_boundary->replace( + this->quantum_boundary->get().find(uid), + {uid, {source_vertex, source_port}}); + } + current_frontier = next_frontier; + } while (boundary_updated && loop <= max_advance); + return; +} + +/** + * advance_frontier_boundary + * terminates when next_cut returns a "slice" where + * no vertices are physically permitted by the architecture + * quantum_boundary and classical_boundary updated to reflect this + */ +void MappingFrontier::advance_frontier_boundary( + const ArchitecturePtr& architecture) { + bool boundary_updated = false; + do { + // next_cut.slice vertices in_edges from this->quantum_boundary + // TODO: add optional skip function later to skip vertices that don't have + // physical requirements + boundary_updated = false; + CutFrontier next_cut = this->circuit_.next_cut( + frontier_convert_vertport_to_edge( + this->circuit_, this->quantum_boundary), + std::make_shared()); + + // For each vertex in a slice, if its physically permitted, update + // quantum_boundary with quantum out edges from vertex (i.e. + // next_cut.u_frontier) + for (const Vertex& vert : *next_cut.slice) { + std::vector uids; + for (const Edge& e : + this->circuit_.get_in_edges_of_type(vert, EdgeType::Quantum)) { + // TODO: look at key_extractor in boost instead of this helper + // method... + uids.push_back(get_unitid_from_unit_frontier( + this->quantum_boundary, + {this->circuit_.source(e), this->circuit_.get_source_port(e)})); + } + + // TODO: update architecture valid operation to reflect devices supporting + // different multi qubit operations also, like, think about how best that + // should actually be done? + std::vector nodes; + for (const UnitID& uid : uids) { + nodes.push_back(Node(uid)); + } + if (architecture->valid_operation( + /* this->circuit_.get_OpType_from_Vertex(vert), */ + nodes)) { + // if no valid operation, boundary not updated and while loop terminates + boundary_updated = true; + for (const UnitID& uid : uids) { + Edge replacement_edge = + next_cut.u_frontier->get().find(uid)->second; + Vertex source_vertex = this->circuit_.source(replacement_edge); + port_t source_port = this->circuit_.get_source_port(replacement_edge); + this->quantum_boundary->replace( + this->quantum_boundary->get().find(uid), + {uid, {source_vertex, source_port}}); + } + } + } + } while (boundary_updated); + return; +} + +/** + * convert_u_frontier_to_edges + * Subcircuit requires EdgeVec, not unit_frontier_t as boundary information + * Helper Functions to convert types + * TODO: also probably another way of doing this? EdgeVec required for + * subcircuit. Double check with someone who knows better than I... + */ +EdgeVec convert_u_frontier_to_edges(const unit_frontier_t& u_frontier) { + EdgeVec edges; + for (const std::pair& pair : u_frontier.get()) { + edges.push_back(pair.second); + } + return edges; +} + +Subcircuit MappingFrontier::get_frontier_subcircuit( + unsigned _max_subcircuit_depth, unsigned _max_subcircuit_size) const { + CutFrontier current_cut = this->circuit_.next_cut( + frontier_convert_vertport_to_edge(this->circuit_, this->quantum_boundary), + this->classical_boundary); + + unsigned subcircuit_depth = 1; + VertexSet subcircuit_vertices( + current_cut.slice->begin(), current_cut.slice->end()); + // add cuts of vertices to subcircuit_vertices until constraints met, or end + // of circuit reached + while (subcircuit_depth < _max_subcircuit_depth && + unsigned(subcircuit_vertices.size()) < _max_subcircuit_size && + current_cut.slice->size() > 0) { + current_cut = + this->circuit_.next_cut(current_cut.u_frontier, current_cut.b_frontier); + subcircuit_depth++; + subcircuit_vertices.insert( + current_cut.slice->begin(), current_cut.slice->end()); + } + if (subcircuit_vertices.size() == 0) { + throw MappingFrontierError("Subcircuit being produced with no gates."); + } + return Subcircuit( + convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( + this->circuit_, this->quantum_boundary)), + convert_u_frontier_to_edges(*current_cut.u_frontier), + subcircuit_vertices); +} + +// TODO: Update to support ancillas +void MappingFrontier::update_quantum_boundary_uids( + const unit_map_t& relabelled_uids) { + for (const std::pair& label : relabelled_uids) { + // implies new labelling + if (label.first != label.second) { + // by type, label.first already assumed in circuit + // this condition means label.second also in circuit + // implies that a merging is done -> remove first qubit + if (this->quantum_boundary->get().find(label.second) != + this->quantum_boundary->get().end()) { + // erase, assume updated already + this->quantum_boundary->erase(label.first); + } else { + auto current_label_it = + this->quantum_boundary->get().find(label.first); + // relabel "label.first" with "label.second" + this->quantum_boundary->replace( + current_label_it, {label.second, current_label_it->second}); + unit_map_t relabel = {label}; + this->circuit_.rename_units(relabel); + } + } + } +} + +// TODO: expects every qubit is present in permutation, even if unmoved +void MappingFrontier::permute_subcircuit_q_out_hole( + const unit_map_t& final_permutation, Subcircuit& subcircuit) { + EdgeVec new_q_out_hole; + int i = 0; + // Change to iterate through final permutation first? + if (this->quantum_boundary->size() != final_permutation.size()) { + throw MappingFrontierError( + "Number of Qubits in mapping permutation does not match number of " + "Qubits in MappingFrontier boundary, for permuting Qubits as with " + "routed Subcircuit."); + } + for (const std::pair& pair : + this->quantum_boundary->get()) { + // other iteration avoids this... + // TODO: change this when making route different subcircuits + auto it = final_permutation.find(pair.first); + if (it == final_permutation.end()) { + throw MappingFrontierError("Qubit in boundary not in permutation."); + } + std::pair uid_pair = *it; + if (uid_pair.first == uid_pair.second) { + new_q_out_hole.push_back(subcircuit.q_out_hole[i]); + } else { + int j = 0; + for (auto it = this->quantum_boundary->get().begin(); + it != this->quantum_boundary->get().end(); ++it) { + if (it->first == uid_pair.second) { + new_q_out_hole.push_back(subcircuit.q_out_hole[j]); + break; + } + j++; + } + } + i++; + } + subcircuit.q_out_hole = new_q_out_hole; +} + +/** + * MappingFrontier::get_u_frontier_default_unit_map + * Map from default qubit register qubits to UnitIDs in quantum_boundary + */ +unit_map_t MappingFrontier::get_default_to_quantum_boundary_unit_map() const { + unsigned i = 0; + unit_map_t default_to_u_frontier_map; + for (const std::pair& pair : + this->quantum_boundary->get()) { + default_to_u_frontier_map.insert({Qubit(i), pair.first}); + i++; + } + return default_to_u_frontier_map; +} + +void MappingFrontier::set_quantum_boundary( + const unit_vertport_frontier_t& new_boundary) { + this->quantum_boundary = std::make_shared(); + for (const std::pair& pair : new_boundary.get()) { + this->quantum_boundary->insert(pair); + } +} + +/** + * add_qubit + * Adds given UnitID as a qubit to held circuit. + * Updates boundary. + */ +void MappingFrontier::add_qubit(const UnitID& uid) { + Qubit qb(uid); + this->circuit_.add_qubit(qb); + this->quantum_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); +} + +/** + * add_swap + * Inserts an OpType::SWAP gate into the uid_0 and uid_1 edges held in + * quantum_boundary This directly modifies circuit_ Updates quantum_boundary to + * reflect new edges + */ +void MappingFrontier::add_swap(const UnitID& uid_0, const UnitID& uid_1) { + // get iterators to quantum_boundary uids + auto uid0_in_it = this->quantum_boundary->find(uid_0); + auto uid1_in_it = this->quantum_boundary->find(uid_1); + + // Add Qubit if not in MappingFrontier boundary (i.e. not in circuit) + if (uid0_in_it == this->quantum_boundary->end()) { + this->add_qubit(uid_0); + uid0_in_it = this->quantum_boundary->find(uid_0); + } + if (uid1_in_it == this->quantum_boundary->end()) { + this->add_qubit(uid_1); + uid1_in_it = this->quantum_boundary->find(uid_1); + } + + // update held ancillas + Node n0 = Node(uid_0); + Node n1 = Node(uid_1); + + bool uid0_ancilla = + this->ancilla_nodes_.find(n0) != this->ancilla_nodes_.end(); + bool uid1_ancilla = + this->ancilla_nodes_.find(n1) != this->ancilla_nodes_.end(); + + if (uid0_ancilla && !uid1_ancilla) { + this->ancilla_nodes_.erase(n0); + this->ancilla_nodes_.insert(n1); + } + if (!uid0_ancilla && uid1_ancilla) { + this->ancilla_nodes_.erase(n1); + this->ancilla_nodes_.insert(n0); + } + + // Get predecessor edges to SWAP insert location + VertPort vp0 = uid0_in_it->second; + VertPort vp1 = uid1_in_it->second; + EdgeVec predecessors = { + this->circuit_.get_nth_out_edge(vp0.first, vp0.second), + this->circuit_.get_nth_out_edge(vp1.first, vp1.second)}; + + // add SWAP vertex to circuit_ and rewire into predecessor + Vertex swap_v = this->circuit_.add_vertex(OpType::SWAP); + this->circuit_.rewire( + swap_v, predecessors, {EdgeType::Quantum, EdgeType::Quantum}); + + // Update boundary to reflect new edges + EdgeVec successors = this->circuit_.get_all_out_edges(swap_v); + this->circuit_.dag[successors[0]].ports.first = 1; + this->circuit_.dag[successors[1]].ports.first = 0; + + this->quantum_boundary->replace( + uid0_in_it, {uid_0, {this->circuit_.source(successors[1]), 0}}); + this->quantum_boundary->replace( + uid1_in_it, {uid_1, {this->circuit_.source(successors[0]), 1}}); + + // update output vertices of quantum boundary of circuit to reflect changing + // qubit paths + auto uid0_circuit_boundary_it = + this->circuit_.boundary.get().find(uid_0); + auto uid1_circuit_boundary_it = + this->circuit_.boundary.get().find(uid_1); + + Vertex uid0_out = uid0_circuit_boundary_it->out_; + Vertex uid1_out = uid1_circuit_boundary_it->out_; + Vertex uid0_in = uid0_circuit_boundary_it->in_; + Vertex uid1_in = uid1_circuit_boundary_it->in_; + + this->circuit_.boundary.get().erase(uid_0); + this->circuit_.boundary.get().erase(uid_1); + + this->circuit_.boundary.get().insert({uid_0, uid0_in, uid1_out}); + this->circuit_.boundary.get().insert({uid_1, uid1_in, uid0_out}); +} + +void MappingFrontier::add_bridge( + const UnitID& control, const UnitID& central, const UnitID& target) { + // get predecessors + auto control_in_it = this->quantum_boundary->find(control); + auto central_in_it = this->quantum_boundary->find(central); + auto target_in_it = this->quantum_boundary->find(target); + + // by virtue of method, control and target qubit will always be in BRIDGE. + // However, distances used to check BRIDGE and find PATH may use + // central qubit that is unallocated, in which add it. + if (central_in_it == this->quantum_boundary->end()) { + this->add_qubit(central); + central_in_it = this->quantum_boundary->find(central); + } + + VertPort vp_control = control_in_it->second; + VertPort vp_central = central_in_it->second; + VertPort vp_target = target_in_it->second; + + EdgeVec predecessors = { + this->circuit_.get_nth_out_edge(vp_control.first, vp_control.second), + this->circuit_.get_nth_out_edge(vp_central.first, vp_central.second), + this->circuit_.get_nth_out_edge(vp_target.first, vp_target.second), + }; // get cx vertex + // this should be guaranteeds by pre-checks + Vertex cx_v = this->circuit_.target(predecessors[0]); + // add bridge + Vertex bridge_v = this->circuit_.add_vertex(OpType::BRIDGE); + // add bridge vertex to circuit + this->circuit_.rewire( + bridge_v, predecessors, + {EdgeType::Quantum, EdgeType::Quantum, EdgeType::Quantum}); + // remove old cx vertex + this->circuit_.remove_vertex( + cx_v, Circuit::GraphRewiring::Yes, Circuit::VertexDeletion::Yes); +} + +} // namespace tket diff --git a/tket/src/Mapping/MappingFrontier.hpp b/tket/src/Mapping/MappingFrontier.hpp new file mode 100644 index 0000000000..d17eb9988b --- /dev/null +++ b/tket/src/Mapping/MappingFrontier.hpp @@ -0,0 +1,142 @@ +#ifndef _TKET_MappingFrontier_H_ +#define _TKET_MappingFrontier_H_ + +#include "Architecture/Architectures.hpp" +#include "Circuit/Circuit.hpp" +#include "Utils/BiMapHeaders.hpp" +#include "Utils/UnitID.hpp" + +namespace tket { + +typedef sequenced_map_t unit_vertport_frontier_t; + +// list of error types to throw out +class MappingFrontierError : public std::logic_error { + public: + explicit MappingFrontierError(const std::string& message) + : std::logic_error(message) {} +}; + +struct MappingFrontier { + /** + * VertPort instead of Edge as Edge changes in substitution, but Vertex and + * Port key information + */ + std::shared_ptr quantum_boundary; + + std::shared_ptr classical_boundary; + + /** + * Circuit held by reference and directly modified with SWAP (or other + * relevant) gates. + */ + Circuit& circuit_; + + std::set ancilla_nodes_; + + MappingFrontier(Circuit& _circuit); + + /** + * Given some Circuit Cut (or routed/unrouted boundary), advances the cut to + * the next cut of just two-qubit vertices, not including the current + * boundary. + * @param max_advance maximum number of cuts checked before terminating + */ + void advance_next_2qb_slice(unsigned max_advance); + + /** + * mapping_frontier data members updated to reflect + * the routed/non-routed boundary of mapping_frontier->circ + * architecture.valid_gate confirms whether circuit vertices are physically + * valid + * + * @param architecture Architecture governing physically allowed operations + */ + void advance_frontier_boundary(const ArchitecturePtr& architecture); + + /** + * Subcircuit produced from gates after held boundary. + * @param _max_subcircuit_depth + * @param _max_subcircuit_size + * + */ + Subcircuit get_frontier_subcircuit( + unsigned _max_subcircuit_depth, unsigned _max_subcircuit_size) const; + + /** + * update_quantum_boundary_uids + * route_circuit has no constraint that passed circuits must have qubits + * relabelled to architecture nodes route_subcircuit is allowed to either + * permute labelled physical qubits, or label logical qubits if logical qubits + * are labelled physical, update_quantum_boundary updates UnitID in + * this->quantum_boundary to reflect this change Also updates this->circuit_ + * to reflect this relabelling + * + * @param relabel_map map between current UnitID's in quantum_boundary and new + * UnitID's. + */ + void update_quantum_boundary_uids(const unit_map_t& relabel_map); + + /** + * permute_subcircuit_q_out_hole + * + * Given initial permutation of UnitIDs, finds final permutation via SWAPs in + * circuit and updates mapping_frontier subcircuit q_out_hole to reflect this + * + * @param final_permutation map between initial and final physical qubits for + * each logical qubit, used to permute subcircuit.q_out_hole + * @param subcircuit Subcircuit for rearranging boundary + */ + void permute_subcircuit_q_out_hole( + const unit_map_t& final_permutation, Subcircuit& subcircuit); + + /** + * get_default_to_quantum_boundary_unit_map + * subcircuit circuits created with default q register + * method returns map between default q register and physical qubit + * permutation at frontier used for circuit.rename_units + */ + unit_map_t get_default_to_quantum_boundary_unit_map() const; + + /** + * add_qubit + * Adds given UnitID as Qubit to this->circuit_. + * Updates this->quantum_boundary with new Qubit. + * + * @param uid UnitID to add. + */ + void add_qubit(const UnitID& uid); + + /** + * add_swap + * Inserts an OpType::SWAP gate into the uid_0 and uid_1 edges held in + * quantum_boundary. This directly modifies circuit_. + * Updates quantum_boundary to reflect new edges. + * + * @param uid_0 First Node in SWAP + * @param uid_1 Second Node in SWAP + */ + void add_swap(const UnitID& uid_0, const UnitID& uid_1); + + /** + * add_bridge + * Inserts an OpType::BRIDGE gate into edges relevant to passed UnitID. + * + * @param control First Node in BRIDGE + * @param central Second Node in BRIDGE + * @param target Third Node in BRIDGE + */ + void add_bridge( + const UnitID& control, const UnitID& central, const UnitID& target); + + /** + * Assigns the quantum_boundary_ attribute to that passed to method. + * + * @param new_boundary Object to reassign with. + */ + void set_quantum_boundary(const unit_vertport_frontier_t& new_boundary); +}; + +} // namespace tket + +#endif \ No newline at end of file diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp new file mode 100644 index 0000000000..9cf826ce61 --- /dev/null +++ b/tket/src/Mapping/MappingManager.cpp @@ -0,0 +1,75 @@ +#include "Mapping/MappingManager.hpp" + +#include "OpType/OpTypeFunctions.hpp" + +namespace tket { + +MappingManager::MappingManager(const ArchitecturePtr& _architecture) + : architecture_(_architecture) {} + +bool MappingManager::route_circuit( + Circuit& circuit, + const std::vector>& routing_methods) + const { + // Assumption; Routing can not route a circuit + // with more logical qubits than an Architecture has + // physical qubits physically permitted + + if (circuit.n_qubits() > this->architecture_->n_uids()) { + std::string error_string = + "Circuit has" + std::to_string(circuit.n_qubits()) + + " logical qubits. Architecture has " + + std::to_string(this->architecture_->n_uids()) + + " physical qubits. Circuit to be routed can not have more " + "qubits than the Architecture."; + throw MappingManagerError(error_string); + } + + // mapping_frontier tracks boundary between routed & un-routed in circuit + // when initialised, boundary is over output edges of input vertices + std::shared_ptr mapping_frontier = + std::make_shared(circuit); + // updates routed/un-routed boundary + + mapping_frontier->advance_frontier_boundary(this->architecture_); + + auto check_finish = [&mapping_frontier]() { + for (const std::pair& pair : + mapping_frontier->quantum_boundary->get()) { + Edge e = mapping_frontier->circuit_.get_nth_out_edge( + pair.second.first, pair.second.second); + Vertex v = mapping_frontier->circuit_.target(e); + + if (!is_final_q_type( + mapping_frontier->circuit_.get_OpType_from_Vertex(v))) { + return false; + } + } + return true; + }; + + bool circuit_modified = !check_finish(); + while (!check_finish()) { + // The order methods are passed in std::vector is + // the order they are iterated through to call "check_method" + // If a method performs better but only on specific subcircuits, + // rank it earlier in the passed vector + bool valid_methods = false; + for (const auto& rm : routing_methods) { + // true => can use held routing method + if (rm.get().check_method(mapping_frontier, this->architecture_)) { + valid_methods = true; + rm.get().routing_method(mapping_frontier, this->architecture_); + break; + } + } + if (!valid_methods) { + throw MappingManagerError( + "No RoutingMethod suitable to map given subcircuit."); + } + // find next routed/unrouted boundary given updates + mapping_frontier->advance_frontier_boundary(this->architecture_); + } + return circuit_modified; +} +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/MappingManager.hpp b/tket/src/Mapping/MappingManager.hpp new file mode 100644 index 0000000000..2db966512e --- /dev/null +++ b/tket/src/Mapping/MappingManager.hpp @@ -0,0 +1,50 @@ +#ifndef _TKET_MappingManager_H_ +#define _TKET_MappingManager_H_ + +#include "Architecture/Architectures.hpp" +#include "Circuit/Circuit.hpp" +#include "Mapping/RoutingMethod.hpp" +#include "Utils/UnitID.hpp" + +namespace tket { + +// list of error types to throw out +class MappingManagerError : public std::logic_error { + public: + explicit MappingManagerError(const std::string& message) + : std::logic_error(message) {} +}; + +typedef ArchitecturePtr ArchitecturePtr; + +class MappingManager { + public: + /* Mapping Manager Constructor */ + // MappingManager object defined by Architecture initialised with + MappingManager(const ArchitecturePtr& _architecture); + + /** + * route_circuit + * Referenced Circuit modified such that all multi-qubit gates are permitted + * by this->architecture_ RoutingIncompability thrown if Circuit has more + * logical qubits than Architecture has physical qubits RoutingIncompability + * thrown if Circuit has a gate of OpType not in Architecture's permitted + * OpTypes + * + * @param circuit Circuit to be routed + * @param routing_methods Ranked RoutingMethod objects to use for routing + * segments. + * + * @return True if circuit is modified + */ + bool route_circuit( + Circuit& circuit, + const std::vector>& routing_methods) + const; + + private: + ArchitecturePtr architecture_; +}; +} // namespace tket + +#endif \ No newline at end of file diff --git a/tket/src/Mapping/RoutingMethod.hpp b/tket/src/Mapping/RoutingMethod.hpp new file mode 100644 index 0000000000..dc96acf938 --- /dev/null +++ b/tket/src/Mapping/RoutingMethod.hpp @@ -0,0 +1,52 @@ +#ifndef _TKET_RoutingMethod_H_ +#define _TKET_RoutingMethod_H_ + +#include "Mapping/MappingFrontier.hpp" + +namespace tket { + +class RoutingMethod { + public: + RoutingMethod(){}; + virtual ~RoutingMethod() {} + /** + * check_method returns true if held method can route given circuit. + * This is completed by converting boundary subcircuit to a Circuit object + * which is then passed to check_subcircuit_ as defined in constructor. + * + * Overloded parameter mapping_frontier contains boundary of gates to be + * checked for method. + * Overloaded parameter architecture is the architecture method works with + * if permitted. + * @return true if method can route subcircuit, false if not + */ + virtual bool check_method( + const std::shared_ptr& /*mapping_frontier*/, + const ArchitecturePtr& /*architecture*/) const { + return false; + } + + /** + * routing_method modifies circuit held in mapping_frontier with gates for the + * purpose of moving circuit closer to one physically permitted by given + * architecture. Returns new initial mapping of qubits incase permutation via + * swap network is then required, or new ancilla qubits are added. + * This is completed by converting boundaty subcircuit in mapping frontier to + * a Circuit object which is then passed to route_subcircuit_ as defined in + * the constructor. + * + * Overloaded parameter mapping_frontier contains boundary of routed/unrouted + * circuit for modifying. + * Overloaded parameter architecture provides physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + virtual unit_map_t routing_method( + std::shared_ptr& /*mapping_frontier*/, + const ArchitecturePtr& /*architecture*/) const { + return {}; + } +}; +} // namespace tket + +#endif \ No newline at end of file diff --git a/tket/src/Mapping/RoutingMethodCircuit.cpp b/tket/src/Mapping/RoutingMethodCircuit.cpp new file mode 100644 index 0000000000..9928b97ab8 --- /dev/null +++ b/tket/src/Mapping/RoutingMethodCircuit.cpp @@ -0,0 +1,68 @@ +#include "RoutingMethodCircuit.hpp" + +namespace tket { + +RoutingMethodCircuit::RoutingMethodCircuit( + const std::function( + const Circuit&, const ArchitecturePtr&)> + _route_subcircuit, + const std::function + _check_subcircuit, + unsigned _max_size, unsigned _max_depth) + : route_subcircuit_(_route_subcircuit), + check_subcircuit_(_check_subcircuit), + max_size_(_max_size), + max_depth_(_max_depth){}; + +bool RoutingMethodCircuit::check_method( + const std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + // Get circuit, pass to held check method + Subcircuit frontier_subcircuit = mapping_frontier->get_frontier_subcircuit( + this->max_depth_, this->max_size_); + Circuit frontier_circuit = + mapping_frontier->circuit_.subcircuit(frontier_subcircuit); + frontier_circuit.rename_units( + mapping_frontier->get_default_to_quantum_boundary_unit_map()); + + return this->check_subcircuit_(frontier_circuit, architecture); +} + +unit_map_t RoutingMethodCircuit::routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + // Produce subcircuit and circuit + Subcircuit frontier_subcircuit = mapping_frontier->get_frontier_subcircuit( + this->max_depth_, this->max_size_); + Circuit frontier_circuit = + mapping_frontier->circuit_.subcircuit(frontier_subcircuit); + frontier_circuit.rename_units( + mapping_frontier->get_default_to_quantum_boundary_unit_map()); + + // get routed subcircuit + std::tuple routed_subcircuit = + this->route_subcircuit_(frontier_circuit, architecture); + unit_map_t new_labelling = std::get<1>(routed_subcircuit); + + // update unit id at boundary in case of relabelling + mapping_frontier->update_quantum_boundary_uids(new_labelling); + + unit_map_t swap_permutation; + for (const auto& pair : new_labelling) { + if (pair.first != pair.second && + architecture->uid_exists(Node(pair.first))) { + swap_permutation.insert(pair); + } + } + // permute edges held by unitid at out boundary due to swaps + mapping_frontier->permute_subcircuit_q_out_hole( + std::get<2>(routed_subcircuit), frontier_subcircuit); + + // substitute old boundary with new cirucit + std::get<0>(routed_subcircuit).flatten_registers(); + mapping_frontier->circuit_.substitute( + std::get<0>(routed_subcircuit), frontier_subcircuit); + // return initial unit_map_t incase swap network required + return swap_permutation; +} +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/RoutingMethodCircuit.hpp b/tket/src/Mapping/RoutingMethodCircuit.hpp new file mode 100644 index 0000000000..3c37d0883b --- /dev/null +++ b/tket/src/Mapping/RoutingMethodCircuit.hpp @@ -0,0 +1,58 @@ +#ifndef _TKET_RoutingMethodCircuit_H_ +#define _TKET_RoutingMethodCircuit_H_ + +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class RoutingMethodCircuit : public RoutingMethod { + public: + virtual ~RoutingMethodCircuit() {} + /** + * RoutingMethodCircuit objects hold methods for partially routing subcircuits + * in the incremental routing of full circuits. + * + * @param _route_subcircuit Function ptr for partial routing method + * @param _check_subcircuit Function ptr for confirming if method sufficient + * @param _max_size Max number of gates in partial routing circuit + * @param _max_depth Max depth of partial routing circuit + */ + RoutingMethodCircuit( + const std::function( + const Circuit&, const ArchitecturePtr&)> + _route_subcircuit, + const std::function + _check_subcircuit, + unsigned _max_size, unsigned _max_depth); + + /** + * @param mapping_frontier Contains boundary of gates to be checked for method + * @param architecture Architecture method would work with if permitted + * @return true if method can route subcircuit, false if not + */ + bool check_method( + const std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const; + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + unit_map_t routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const; + + private: + const std::function( + const Circuit&, const ArchitecturePtr&)> + route_subcircuit_; + const std::function + check_subcircuit_; + unsigned max_size_, max_depth_; +}; +} // namespace tket + +#endif diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp new file mode 100644 index 0000000000..412d78f32c --- /dev/null +++ b/tket/tests/test_LexiRoute.cpp @@ -0,0 +1,441 @@ +#include + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Predicates/CompilationUnit.hpp" +#include "Predicates/CompilerPass.hpp" +#include "Predicates/PassGenerators.hpp" +#include "Predicates/PassLibrary.hpp" +#include "Routing/Routing.hpp" + +namespace tket { +SCENARIO("Test LexiRoute::solve") { + std::vector nodes = {Node("test_node", 0), Node("test_node", 1), + Node("test_node", 2), Node("node_test", 3), + Node("node_test", 4), Node("node_test", 5), + Node("test_node", 6), Node("node_test", 7)}; + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[2], nodes[5]}, + {nodes[5], nodes[6]}, + {nodes[3], nodes[7]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("Single best solution, all qubits labelled.") { + Circuit circ(6); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + circ.add_op(OpType::CX, {qubits[4], qubits[5]}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[6]}, {qubits[5], nodes[5]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + + lr.solve(4); + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 4); + Command swap_c = commands[1]; + unit_vector_t uids = {nodes[1], nodes[2]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + } + GIVEN("Single best solution, one qubit unlabelled.") { + Circuit circ(6); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + circ.add_op(OpType::CX, {qubits[4], qubits[5]}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, + {qubits[5], nodes[5]}}; + circ.rename_units(rename_map); + std::shared_ptr mf0 = + std::make_shared(circ); + LexiRoute lr(shared_arc, mf0); + + lr.solve(4); + + REQUIRE(mf0->circuit_.n_gates() == 3); + + rename_map = {{qubits[4], nodes[6]}}; + mf0->circuit_.rename_units(rename_map); + + std::shared_ptr mf1 = + std::make_shared(circ); + LexiRoute lr1(shared_arc, mf1); + lr1.solve(4); + std::vector commands = mf1->circuit_.get_commands(); + Command swap_c = commands[1]; + unit_vector_t uids = {nodes[1], nodes[2]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + } + GIVEN("Single best solution, one stage of look-ahead required.") { + Circuit circ(8); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[2], qubits[7]}); + // n7 + // | + // n0 -- n1 -- n2 -- n3 -- n4 + // | + // n5 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + + lr.solve(4); + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 4); + Command swap_c = commands[0]; + unit_vector_t uids = {nodes[7], nodes[3]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + Command changed_c = commands[3]; + uids = {nodes[2], nodes[3]}; + REQUIRE(changed_c.get_args() == uids); + } + GIVEN("All unlabelled, labelling can give complete solution.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[0], qubits[3]}); + circ.add_op(OpType::CX, {qubits[3], qubits[4]}); + + std::shared_ptr mf = + std::make_shared(circ); + LexiRoute lr0(shared_arc, mf); + lr0.solve(20); + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 4); + Command c = commands[0]; + unit_vector_t uids = {nodes[2], nodes[1]}; + REQUIRE(c.get_args() == uids); + mf->advance_frontier_boundary(shared_arc); + + LexiRoute lr1(shared_arc, mf); + lr1.solve(20); + uids = {nodes[2], nodes[3]}; + REQUIRE(mf->circuit_.get_commands()[1].get_args() == uids); + mf->advance_frontier_boundary(shared_arc); + + LexiRoute lr2(shared_arc, mf); + lr2.solve(20); + uids = {nodes[2], nodes[5]}; + REQUIRE(mf->circuit_.get_commands()[2].get_args() == uids); + mf->advance_frontier_boundary(shared_arc); + + LexiRoute lr3(shared_arc, mf); + lr3.solve(20); + uids = {nodes[5], nodes[6]}; + REQUIRE(mf->circuit_.get_commands()[3].get_args() == uids); + } + GIVEN("Bridge preferred, CX.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[1]}, + {qubits[1], nodes[3]}, + {qubits[2], nodes[0]}, + {qubits[3], nodes[7]}, + {qubits[4], nodes[2]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr(shared_arc, mf); + lr.solve(4); + Command bridge_c = mf->circuit_.get_commands()[0]; + unit_vector_t uids = {nodes[1], nodes[2], nodes[3]}; + REQUIRE(bridge_c.get_args() == uids); + REQUIRE(*bridge_c.get_op_ptr() == *get_op_ptr(OpType::BRIDGE)); + } + GIVEN("Bridge preferred, CZ.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[1]}, + {qubits[1], nodes[3]}, + {qubits[2], nodes[0]}, + {qubits[3], nodes[7]}, + {qubits[4], nodes[2]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr(shared_arc, mf); + lr.solve(4); + REQUIRE(mf->circuit_.get_commands().size() == 4); + } + GIVEN("Ancilla assignment and then merge preferred.") { + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4)}; + // just a ring + + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[4], nodes[0]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + std::map rename_map = { + {qubits[0], nodes[2]}, {qubits[1], nodes[4]}}; + circ.rename_units(rename_map); + + std::shared_ptr mf = + std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr0(shared_arc, mf); + lr0.solve(20); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr1(shared_arc, mf); + lr1.solve(20); + } +} +SCENARIO("Test LexiRouteRoutingMethod") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4), Node("node_test", 5), + Node("test_node", 6), Node("node_test", 7), Node("node_test", 8), + Node("node_test", 9), Node("node_test", 10)}; + // n9 -- n8 -- n10 + // | | + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[2], nodes[5]}, + {nodes[5], nodes[6]}, + {nodes[3], nodes[7]}, + {nodes[2], nodes[8]}, + {nodes[8], nodes[9]}, + {nodes[8], nodes[10]}, + {nodes[3], nodes[10]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("Circuit with all qubits, labelled, stage 0.") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + circ.rename_units(rename_map); + + std::shared_ptr mf = + std::make_shared(circ); + LexiRouteRoutingMethod lrrm(100); + REQUIRE(lrrm.check_method(mf, shared_arc)); + + unit_map_t init_map = lrrm.routing_method(mf, shared_arc); + REQUIRE(init_map.size() == 0); + + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 9); + Command bridge_c = commands[2]; + unit_vector_t uids = {nodes[5], nodes[2], nodes[8]}; + REQUIRE(bridge_c.get_args() == uids); + REQUIRE(*bridge_c.get_op_ptr() == *get_op_ptr(OpType::BRIDGE)); + } + GIVEN("Circuit with all qubits, labelled, stage 1.") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + // n9 -- n8 -- n3 + // | | + // n0 -- n1 -- n2 -- n10 -- n4 + // | | + // n6 n7 + // | + // n5 + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[6]}, + {qubits[6], nodes[5]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + circ.rename_units(rename_map); + + std::shared_ptr mf = + std::make_shared(circ); + LexiRouteRoutingMethod lrrm(100); + unit_map_t init_map = lrrm.routing_method(mf, shared_arc); + REQUIRE(init_map.size() == 0); + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 10); + Command swap_c = commands[0]; + unit_vector_t uids = {nodes[3], nodes[10]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + } +} +SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { + GIVEN("11 Node Architecture, 11 Qubit circuit, multiple SWAP required.") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4), Node("node_test", 5), + Node("test_node", 6), Node("node_test", 7), Node("node_test", 8), + Node("node_test", 9), Node("node_test", 10)}; + // n9 -- n8 -- n10 + // | | + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[2], nodes[5]}, + {nodes[5], nodes[6]}, + {nodes[3], nodes[7]}, + {nodes[2], nodes[8]}, + {nodes[8], nodes[9]}, + {nodes[8], nodes[10]}, + {nodes[3], nodes[10]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + for (unsigned i = 0; i < 10; i++) { + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[2], qubits[8]}); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + } + + Circuit copy_circ(circ); + // transform stuff + PassPtr dec = gen_decompose_routing_gates_to_cxs_pass(architecture, false); + + MappingManager mm(shared_arc); + LexiRouteRoutingMethod lrrm(100); + std::shared_ptr mf = + std::make_shared(copy_circ); + + std::vector> vrm = {lrrm}; + REQUIRE(vrm[0].get().check_method(mf, shared_arc)); + + bool res = mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = + std::make_shared(architecture); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu0(circ, preds); + dec->apply(cu0); + REQUIRE(res); + REQUIRE(cu0.check_all_predicates()); + } + GIVEN("Square Grid Architecture, large number of gates.") { + SquareGrid sg(5, 10); + ArchitecturePtr shared_arc = std::make_shared(sg); + Circuit circ(35); + std::vector qubits = circ.all_qubits(); + for (unsigned i = 0; i < qubits.size() - 1; i++) { + circ.add_op(OpType::CX, {qubits[i], qubits[i + 1]}); + } + for (unsigned i = 0; i < qubits.size() - 2; i++) { + circ.add_op(OpType::CZ, {qubits[i], qubits[i + 2]}); + } + // transform stuff + PassPtr dec = gen_decompose_routing_gates_to_cxs_pass(sg, false); + + MappingManager mm(shared_arc); + LexiRouteRoutingMethod lrrm(100); + std::vector> vrm = {lrrm}; + bool res = mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = std::make_shared(sg); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + dec->apply(cu); + REQUIRE(res); + REQUIRE(cu.check_all_predicates()); + REQUIRE(circ.n_gates() == 88); + } +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_LexicographicalComparison.cpp b/tket/tests/test_LexicographicalComparison.cpp new file mode 100644 index 0000000000..fc421d2202 --- /dev/null +++ b/tket/tests/test_LexicographicalComparison.cpp @@ -0,0 +1,211 @@ +#include +#include +#include +#include + +#include "Mapping/LexicographicalComparison.hpp" + +namespace tket { + +SCENARIO("Test LexicographicalComparison::LexicographicalComparison") { + GIVEN("Five Node Architecture, interacting nodes all in architecture.") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("test_node", 3), Node("test_node", 4)}; + // n0 -- n1 -- n2 + // | + // n3 + // | + // n4 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[1], nodes[3]}, + {nodes[3], nodes[4]}}); + interacting_nodes_t interacting_nodes = { + {nodes[0], nodes[3]}, + {nodes[3], nodes[0]}, + {nodes[2], nodes[4]}, + {nodes[4], nodes[2]}}; + + ArchitecturePtr sc = std::make_shared(architecture); + + LexicographicalComparison lc_test(sc, interacting_nodes); + + lexicographical_distances_t distances = + lc_test.get_lexicographical_distances(); + REQUIRE(distances.size() == 3); + REQUIRE(distances[0] == 2); + REQUIRE(distances[1] == 2); + REQUIRE(distances[2] == 0); + } + GIVEN("Three Node architecture, some interacting node not in architecture.") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2)}; + Architecture architecture({{nodes[0], nodes[1]}, {nodes[1], nodes[2]}}); + ArchitecturePtr sa = std::make_shared(architecture); + interacting_nodes_t interacting_nodes = { + {nodes[0], Node("bad_node", 4)}, {Node("test_node", 3), nodes[0]}}; + REQUIRE_THROWS_AS( + LexicographicalComparison(sa, interacting_nodes), + LexicographicalComparisonError); + } +} + +SCENARIO("Test LexicographicalComparison::increment_distances") { + GIVEN("Three Node Architecture, varying standard increments.") { + std::vector nodes = {Node(0), Node(1), Node(2)}; + Architecture architecture({{nodes[0], nodes[1]}, {nodes[1], nodes[2]}}); + interacting_nodes_t interactions = { + {nodes[0], nodes[2]}, {nodes[2], nodes[0]}}; + ArchitecturePtr sa = std::make_shared(architecture); + LexicographicalComparison lc_test(sa, interactions); + + lexicographical_distances_t distances = + lc_test.get_lexicographical_distances(); + REQUIRE(distances[0] == 2); + REQUIRE(distances[1] == 0); + + std::pair interaction = {nodes[0], nodes[2]}; + lc_test.increment_distances(distances, interaction, -2); + REQUIRE(distances[0] == 0); + REQUIRE(distances[1] == 0); + + REQUIRE_THROWS_AS( + lc_test.increment_distances(distances, interaction, -2), + LexicographicalComparisonError); + + interaction = {nodes[1], nodes[0]}; + lc_test.increment_distances(distances, interaction, 2); + REQUIRE(distances[0] == 0); + REQUIRE(distances[1] == 2); + } +} + +SCENARIO( + "Test LexicographicalComparison::get_updated_distances, five node " + "architecture") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("test_node", 3), Node("test_node", 4)}; + // n0 -- n1 -- n2 + // | + // n3 + // | + // n4 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[1], nodes[3]}, + {nodes[3], nodes[4]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + interacting_nodes_t interacting_nodes = { + {nodes[0], nodes[3]}, + {nodes[3], nodes[0]}, + {nodes[2], nodes[4]}, + {nodes[4], nodes[2]}}; + + LexicographicalComparison lc_test(shared_arc, interacting_nodes); + GIVEN("Two identical legal swap, one node in interaction.") { + swap_t swap_12 = {nodes[1], nodes[2]}; + swap_t swap_21 = {nodes[1], nodes[2]}; + lexicographical_distances_t distances_12 = + lc_test.get_updated_distances(swap_12); + REQUIRE(distances_12.size() == 3); + REQUIRE(distances_12[0] == 0); + REQUIRE(distances_12[1] == 4); + REQUIRE(distances_12[2] == 0); + REQUIRE(distances_12 == lc_test.get_updated_distances(swap_21)); + } + GIVEN("Two identical legal swap, both node in interaction.") { + swap_t swap_34 = {nodes[3], nodes[4]}; + swap_t swap_43 = {nodes[4], nodes[3]}; + lexicographical_distances_t distances_34 = + lc_test.get_updated_distances(swap_34); + REQUIRE(distances_34.size() == 3); + REQUIRE(distances_34[0] == 2); + REQUIRE(distances_34[1] == 2); + REQUIRE(distances_34[2] == 0); + REQUIRE(distances_34 == lc_test.get_updated_distances(swap_43)); + } + GIVEN("Illegal swap.") { + // illegal swap -> as Node not in architecture will return unchanged + swap_t swap_illegal = {Node("bad_node", 0), Node("bad_node", 9)}; + lexicographical_distances_t distances_illegal = + lc_test.get_updated_distances(swap_illegal); + REQUIRE(distances_illegal == lc_test.get_lexicographical_distances()); + } + GIVEN("Swap between two qubits in already adjacent interaction.") { + interacting_nodes_t interacting = { + {nodes[0], nodes[1]}, {nodes[3], nodes[4]}}; + LexicographicalComparison lc_in(shared_arc, interacting); + swap_t swap_01 = {nodes[0], nodes[1]}; + swap_t swap_10 = {nodes[1], nodes[0]}; + swap_t swap_34 = {nodes[3], nodes[4]}; + swap_t swap_43 = {nodes[4], nodes[3]}; + lexicographical_distances_t distances_01 = + lc_in.get_updated_distances(swap_01); + lexicographical_distances_t distances_10 = + lc_in.get_updated_distances(swap_10); + lexicographical_distances_t distances_34 = + lc_in.get_updated_distances(swap_34); + lexicographical_distances_t distances_43 = + lc_in.get_updated_distances(swap_43); + lexicographical_distances_t base_distances = + lc_in.get_lexicographical_distances(); + lexicographical_distances_t comp = {0, 0, 4}; + REQUIRE(base_distances == comp); + REQUIRE(distances_01 == base_distances); + REQUIRE(distances_10 == base_distances); + REQUIRE(distances_34 == base_distances); + REQUIRE(distances_43 == base_distances); + } +} + +SCENARIO("Test LexicographicalComparison::remove_swaps_lexicographical") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("test_node", 3), Node("test_node", 4)}; + // n0 -- n1 -- n2 + // | + // n3 + // | + // n4 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[1], nodes[3]}, + {nodes[3], nodes[4]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + interacting_nodes_t interacting_nodes = { + {nodes[0], nodes[3]}, + {nodes[3], nodes[0]}, + {nodes[2], nodes[4]}, + {nodes[4], nodes[2]}}; + + LexicographicalComparison lc_test(shared_arc, interacting_nodes); + GIVEN("Single Swap.") { + swap_t swap_01 = {nodes[0], nodes[1]}; + swap_set_t candidate_swaps = {swap_01}; + lc_test.remove_swaps_lexicographical(candidate_swaps); + REQUIRE(candidate_swaps.size() == 1); + REQUIRE(*candidate_swaps.begin() == swap_01); + } + GIVEN("Two Swap, both identical.") { + swap_t swap_01 = {nodes[0], nodes[1]}; + swap_t swap_10 = {nodes[1], nodes[0]}; + swap_set_t candidate_swaps = {swap_01, swap_10}; + lc_test.remove_swaps_lexicographical(candidate_swaps); + REQUIRE(candidate_swaps.size() == 2); + } + GIVEN("Swap on all edges.") { + swap_t swap_01 = {nodes[0], nodes[1]}; + swap_t swap_12 = {nodes[1], nodes[2]}; + swap_t swap_13 = {nodes[1], nodes[3]}; + swap_t swap_34 = {nodes[3], nodes[4]}; + swap_set_t candidate_swaps = {swap_01, swap_12, swap_13, swap_34}; + lc_test.remove_swaps_lexicographical(candidate_swaps); + REQUIRE(candidate_swaps.size() == 1); + } +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MappingFrontier.cpp b/tket/tests/test_MappingFrontier.cpp new file mode 100644 index 0000000000..0cc0e8caed --- /dev/null +++ b/tket/tests/test_MappingFrontier.cpp @@ -0,0 +1,695 @@ +#include +#include +#include +#include + +#include "Mapping/MappingManager.hpp" + +namespace tket { + +SCENARIO("Test MappingFrontier initialisation, advance_frontier_boundary.") { + GIVEN("A typical Circuit and Architecture with uninitialised boundary") { + Circuit circ; + circ.add_q_register("test_nodes", 4); + + std::vector qubits = circ.all_qubits(); + + Vertex v1 = circ.add_op(OpType::X, {qubits[0]}); + Vertex v8 = circ.add_op(OpType::S, {qubits[3]}); + Vertex v9 = circ.add_op(OpType::T, {qubits[3]}); + Vertex v2 = circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + Vertex v3 = circ.add_op(OpType::CY, {qubits[2], qubits[3]}); + Vertex v4 = circ.add_op(OpType::H, {qubits[0]}); + Vertex v5 = circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + Vertex v6 = circ.add_op(OpType::Y, {qubits[0]}); + Vertex v7 = circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + mf.advance_frontier_boundary(shared_arc); + + VertPort vp0 = mf.quantum_boundary->get().find(nodes[0])->second; + VertPort vp1 = mf.quantum_boundary->get().find(nodes[1])->second; + VertPort vp2 = mf.quantum_boundary->get().find(nodes[2])->second; + VertPort vp3 = mf.quantum_boundary->get().find(nodes[3])->second; + + Edge e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + Edge e1 = mf.circuit_.get_nth_out_edge(vp1.first, vp1.second); + Edge e2 = mf.circuit_.get_nth_out_edge(vp2.first, vp2.second); + Edge e3 = mf.circuit_.get_nth_out_edge(vp3.first, vp3.second); + + REQUIRE(mf.circuit_.source(e0) == v4); + REQUIRE(mf.circuit_.target(e0) == v5); + REQUIRE(mf.circuit_.source(e1) == v2); + REQUIRE(mf.circuit_.target(e1) == v7); + REQUIRE( + mf.circuit_.get_OpType_from_Vertex(mf.circuit_.source(e2)) == + OpType::Input); + REQUIRE(mf.circuit_.target(e2) == v3); + REQUIRE(mf.circuit_.source(e3) == v9); + REQUIRE(mf.circuit_.target(e3) == v3); + + mf.advance_frontier_boundary(shared_arc); + // check that advance_frontier_boundary doesn't incorrectly move boundary + // forwards + vp0 = mf.quantum_boundary->get().find(nodes[0])->second; + vp1 = mf.quantum_boundary->get().find(nodes[1])->second; + vp2 = mf.quantum_boundary->get().find(nodes[2])->second; + vp3 = mf.quantum_boundary->get().find(nodes[3])->second; + + e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + e1 = mf.circuit_.get_nth_out_edge(vp1.first, vp1.second); + e2 = mf.circuit_.get_nth_out_edge(vp2.first, vp2.second); + e3 = mf.circuit_.get_nth_out_edge(vp3.first, vp3.second); + + REQUIRE(mf.circuit_.source(e0) == v4); + REQUIRE(mf.circuit_.target(e0) == v5); + REQUIRE(mf.circuit_.source(e1) == v2); + REQUIRE(mf.circuit_.target(e1) == v7); + REQUIRE( + mf.circuit_.get_OpType_from_Vertex(mf.circuit_.source(e2)) == + OpType::Input); + REQUIRE(mf.circuit_.target(e2) == v3); + REQUIRE(mf.circuit_.source(e3) == v9); + REQUIRE(mf.circuit_.target(e3) == v3); + } +} + +SCENARIO("Test MappingFrontier get_default_to_quantum_boundary_unit_map") { + Circuit circ; + circ.add_q_register("test_nodes", 4); + std::vector qubits = circ.all_qubits(); + MappingFrontier mf(circ); + unit_map_t d_2_q = mf.get_default_to_quantum_boundary_unit_map(); + REQUIRE(d_2_q[Qubit(0)] == qubits[0]); + REQUIRE(d_2_q[Qubit(1)] == qubits[1]); + REQUIRE(d_2_q[Qubit(2)] == qubits[2]); + REQUIRE(d_2_q[Qubit(3)] == qubits[3]); +} + +SCENARIO("Test MappingFrontier get_frontier_subcircuit.") { + GIVEN( + "A typical circuit, MappingFrontier with depth 1 and depth 3 " + "subcircuit returns, no renaming units.") { + Circuit circ; + circ.add_q_register("test_nodes", 4); + std::vector qubits = circ.all_qubits(); + + Vertex v1 = circ.add_op(OpType::X, {qubits[0]}); + Vertex v8 = circ.add_op(OpType::S, {qubits[3]}); + Vertex v9 = circ.add_op(OpType::T, {qubits[3]}); + Vertex v2 = circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + Vertex v3 = circ.add_op(OpType::CY, {qubits[2], qubits[3]}); + Vertex v4 = circ.add_op(OpType::H, {qubits[0]}); + Vertex v5 = circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + Vertex v6 = circ.add_op(OpType::Y, {qubits[0]}); + Vertex v7 = circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf_1(circ); + MappingFrontier mf_3(circ); + + mf_1.advance_frontier_boundary(shared_arc); + Subcircuit sc1 = mf_1.get_frontier_subcircuit(1, 7); + mf_3.advance_frontier_boundary(shared_arc); + Subcircuit sc3 = mf_3.get_frontier_subcircuit(3, 7); + + Circuit frontier_circuit_1 = mf_1.circuit_.subcircuit(sc1); + + Circuit comparison_circuit(4); + comparison_circuit.add_op(OpType::CY, {2, 3}); + REQUIRE(frontier_circuit_1 == comparison_circuit); + + Circuit frontier_circuit_3 = mf_3.circuit_.subcircuit(sc3); + comparison_circuit.add_op(OpType::CZ, {0, 2}); + comparison_circuit.add_op(OpType::Y, {0}); + comparison_circuit.add_op(OpType::CX, {3, 1}); + REQUIRE(frontier_circuit_3 == comparison_circuit); + } + + GIVEN( + "A typical circuit but with non-contiguous Qubit Labelling. " + "MappingFrontier with depth 1 and depth 3 " + "subcircuit returns, no renaming units.") { + Circuit circ(4); + Qubit q0("label_0", 1); + Qubit q1("label_1", 3); + Qubit q2("label_2", 0); + Qubit q3("label_3", 2); + std::vector qubits = {q0, q1, q2, q3}; + std::map new_units = { + {Qubit(0), q0}, {Qubit(1), q1}, {Qubit(2), q2}, {Qubit(3), q3}}; + circ.rename_units(new_units); + + Vertex v1 = circ.add_op(OpType::X, {qubits[0]}); + Vertex v8 = circ.add_op(OpType::S, {qubits[3]}); + Vertex v9 = circ.add_op(OpType::T, {qubits[3]}); + Vertex v2 = circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + Vertex v3 = circ.add_op(OpType::CY, {qubits[2], qubits[3]}); + Vertex v4 = circ.add_op(OpType::H, {qubits[0]}); + Vertex v5 = circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + Vertex v6 = circ.add_op(OpType::Y, {qubits[0]}); + Vertex v7 = circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {q0, nodes[0]}, {q1, nodes[1]}, {q2, nodes[2]}, {q3, nodes[3]}}; + + circ.rename_units(rename_map); + + MappingFrontier mf_1(circ); + MappingFrontier mf_3(circ); + + mf_1.advance_frontier_boundary(shared_arc); + Subcircuit sc1 = mf_1.get_frontier_subcircuit(1, 7); + mf_3.advance_frontier_boundary(shared_arc); + Subcircuit sc3 = mf_3.get_frontier_subcircuit(3, 7); + + Circuit frontier_circuit_1 = mf_1.circuit_.subcircuit(sc1); + + frontier_circuit_1.rename_units( + mf_1.get_default_to_quantum_boundary_unit_map()); + Circuit comparison_circuit(4); + std::map rename_map_default = { + {Qubit(0), nodes[0]}, + {Qubit(1), nodes[1]}, + {Qubit(2), nodes[2]}, + {Qubit(3), nodes[3]}}; + comparison_circuit.rename_units(rename_map_default); + comparison_circuit.add_op(OpType::CY, {nodes[2], nodes[3]}); + REQUIRE(frontier_circuit_1 == comparison_circuit); + Circuit frontier_circuit_3 = mf_3.circuit_.subcircuit(sc3); + frontier_circuit_3.rename_units( + mf_3.get_default_to_quantum_boundary_unit_map()); + + comparison_circuit.add_op(OpType::CZ, {nodes[0], nodes[2]}); + comparison_circuit.add_op(OpType::Y, {nodes[0]}); + comparison_circuit.add_op(OpType::CX, {nodes[3], nodes[1]}); + REQUIRE(frontier_circuit_3 == comparison_circuit); + } +} + +SCENARIO("Test update_quantum_boundary_uids.") { + Circuit circ(10); + std::vector qbs = circ.all_qubits(); + MappingFrontier mf(circ); + GIVEN("Empty relabelling.") { mf.update_quantum_boundary_uids({}); } + GIVEN("Relabel some qubits to same qubit.") { + mf.update_quantum_boundary_uids( + {{qbs[0], qbs[0]}, {qbs[2], qbs[2]}, {qbs[7], qbs[7]}}); + REQUIRE(mf.quantum_boundary->get().find(qbs[0])->first == qbs[0]); + REQUIRE(mf.quantum_boundary->get().find(qbs[2])->first == qbs[2]); + REQUIRE(mf.quantum_boundary->get().find(qbs[7])->first == qbs[7]); + } + GIVEN("Relabel to already present qubit, check boundary has qubit removed.") { + mf.update_quantum_boundary_uids({{qbs[0], qbs[1]}}); + REQUIRE(mf.quantum_boundary->get().size() == 9); + } + GIVEN("Relabel to new UnitID.") { + mf.update_quantum_boundary_uids({{qbs[0], Node("tn", 6)}}); + REQUIRE( + mf.quantum_boundary->get().find(qbs[0]) == + mf.quantum_boundary->get().end()); + } +} + +SCENARIO("Test permute_subcircuit_q_out_hole.") { + GIVEN("A four qubit subcircuit where every qubit is permuted by given map.") { + Circuit circ(0); + circ.add_q_register("test_nodes", 4); + Qubit q0("test_nodes", 0); + Qubit q1("test_nodes", 1); + Qubit q2("test_nodes", 2); + Qubit q3("test_nodes", 3); + + Vertex v1 = circ.add_op(OpType::X, {q0}); + Vertex v2 = circ.add_op(OpType::CX, {q0, q1}); + Vertex v3 = circ.add_op(OpType::CY, {q2, q3}); + Vertex v5 = circ.add_op(OpType::CZ, {q0, q2}); + Vertex v7 = circ.add_op(OpType::CX, {q3, q1}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {q0, nodes[0]}, {q1, nodes[1]}, {q2, nodes[2]}, {q3, nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + + mf.advance_frontier_boundary(shared_arc); + Subcircuit sc = mf.get_frontier_subcircuit(2, 5); + // assume only 1 subcircuit + EdgeVec original_q_out = sc.q_out_hole; + + unit_map_t permutation = { + {nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[0]}}; + mf.permute_subcircuit_q_out_hole(permutation, sc); + + EdgeVec permuted_q_out = sc.q_out_hole; + + REQUIRE(original_q_out[1] == permuted_q_out[0]); + REQUIRE(original_q_out[2] == permuted_q_out[1]); + REQUIRE(original_q_out[3] == permuted_q_out[2]); + REQUIRE(original_q_out[0] == permuted_q_out[3]); + } + GIVEN("A four qubit subcircuit with a partial permutation.") { + Circuit circ(0); + circ.add_q_register("test_nodes", 4); + Qubit q0("test_nodes", 0); + Qubit q1("test_nodes", 1); + Qubit q2("test_nodes", 2); + Qubit q3("test_nodes", 3); + + Vertex v1 = circ.add_op(OpType::X, {q0}); + Vertex v2 = circ.add_op(OpType::CX, {q0, q1}); + Vertex v3 = circ.add_op(OpType::CY, {q2, q3}); + Vertex v5 = circ.add_op(OpType::CZ, {q0, q2}); + Vertex v7 = circ.add_op(OpType::CX, {q3, q1}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {q0, nodes[0]}, {q1, nodes[1]}, {q2, nodes[2]}, {q3, nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + + mf.advance_frontier_boundary(shared_arc); + Subcircuit sc = mf.get_frontier_subcircuit(2, 5); + // assume only 1 subcircuit + EdgeVec original_q_out = sc.q_out_hole; + + unit_map_t permutation = { + {nodes[0], nodes[1]}, + {nodes[1], nodes[0]}, + {nodes[2], nodes[2]}, + {nodes[3], nodes[3]}}; + mf.permute_subcircuit_q_out_hole(permutation, sc); + + EdgeVec permuted_q_out = sc.q_out_hole; + + REQUIRE(original_q_out[1] == permuted_q_out[0]); + REQUIRE(original_q_out[0] == permuted_q_out[1]); + REQUIRE(original_q_out[2] == permuted_q_out[2]); + REQUIRE(original_q_out[3] == permuted_q_out[3]); + } +} +SCENARIO("Test MappingFrontier::advance_next_2qb_slice") { + std::vector nodes = {Node("test_node", 0), Node("test_node", 1), + Node("test_node", 2), Node("node_test", 3), + Node("node_test", 4), Node("node_test", 5), + Node("test_node", 6), Node("node_test", 7)}; + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[2], nodes[5]}, + {nodes[5], nodes[6]}, + {nodes[3], nodes[7]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("One CX to find in next slice.") { + Circuit circ(8); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::X, {qubits[7]}); + circ.add_op(OpType::CX, {qubits[2], qubits[7]}); + // n7 + // | + // n0 -- n1 -- n2 -- n3 -- n4 + // | + // n5 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + // gets to first two cx + mf.advance_frontier_boundary(shared_arc); + + VertPort vp0 = mf.quantum_boundary->get().find(nodes[0])->second; + VertPort vp4 = mf.quantum_boundary->get().find(nodes[4])->second; + VertPort vp6 = mf.quantum_boundary->get().find(nodes[6])->second; + VertPort vp7 = mf.quantum_boundary->get().find(nodes[7])->second; + + Edge e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + Edge e4 = mf.circuit_.get_nth_out_edge(vp4.first, vp4.second); + Edge e6 = mf.circuit_.get_nth_out_edge(vp6.first, vp6.second); + Edge e7 = mf.circuit_.get_nth_out_edge(vp7.first, vp7.second); + + Vertex v0 = mf.circuit_.target(e0); + Vertex v4 = mf.circuit_.target(e4); + Vertex v6 = mf.circuit_.target(e6); + Vertex v7 = mf.circuit_.target(e7); + + REQUIRE(v0 == v4); + REQUIRE(v6 == v7); + + mf.advance_next_2qb_slice(5); + VertPort vp2 = mf.quantum_boundary->get().find(nodes[2])->second; + vp7 = mf.quantum_boundary->get().find(nodes[7])->second; + + Edge e2 = mf.circuit_.get_nth_out_edge(vp2.first, vp2.second); + e7 = mf.circuit_.get_nth_out_edge(vp7.first, vp7.second); + + Vertex v2 = mf.circuit_.target(e2); + v7 = mf.circuit_.target(e7); + + REQUIRE(v2 == v7); + } + GIVEN( + "Three CX to find in next slice 1, Two CX and one CZ in next slice 2. ") { + Circuit circ(8); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[2], qubits[7]}); + circ.add_op(OpType::CX, {qubits[0], qubits[5]}); + circ.add_op(OpType::X, {qubits[0]}); + circ.add_op(OpType::CX, {qubits[4], qubits[1]}); + circ.add_op(OpType::CX, {qubits[2], qubits[0]}); + circ.add_op(OpType::X, {qubits[1]}); + circ.add_op(OpType::CX, {qubits[4], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[7]}); + // n7 + // | + // n0 -- n1 -- n2 -- n3 -- n4 + // | + // n5 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + // gets to first two cx + mf.advance_frontier_boundary(shared_arc); + + VertPort vp0 = mf.quantum_boundary->get().find(nodes[0])->second; + VertPort vp4 = mf.quantum_boundary->get().find(nodes[4])->second; + VertPort vp6 = mf.quantum_boundary->get().find(nodes[6])->second; + VertPort vp7 = mf.quantum_boundary->get().find(nodes[7])->second; + + Edge e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + Edge e4 = mf.circuit_.get_nth_out_edge(vp4.first, vp4.second); + Edge e6 = mf.circuit_.get_nth_out_edge(vp6.first, vp6.second); + Edge e7 = mf.circuit_.get_nth_out_edge(vp7.first, vp7.second); + + Vertex v0 = mf.circuit_.target(e0); + Vertex v4 = mf.circuit_.target(e4); + Vertex v6 = mf.circuit_.target(e6); + Vertex v7 = mf.circuit_.target(e7); + + // get edges + // then get target... + REQUIRE(v0 == v4); + REQUIRE(v6 == v7); + + mf.advance_next_2qb_slice(1); + vp0 = mf.quantum_boundary->get().find(nodes[0])->second; + VertPort vp1 = mf.quantum_boundary->get().find(nodes[1])->second; + VertPort vp2 = mf.quantum_boundary->get().find(nodes[2])->second; + vp4 = mf.quantum_boundary->get().find(nodes[4])->second; + VertPort vp5 = mf.quantum_boundary->get().find(nodes[5])->second; + vp7 = mf.quantum_boundary->get().find(nodes[7])->second; + + e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + Edge e1 = mf.circuit_.get_nth_out_edge(vp1.first, vp1.second); + Edge e2 = mf.circuit_.get_nth_out_edge(vp2.first, vp2.second); + e4 = mf.circuit_.get_nth_out_edge(vp4.first, vp4.second); + Edge e5 = mf.circuit_.get_nth_out_edge(vp5.first, vp5.second); + e7 = mf.circuit_.get_nth_out_edge(vp7.first, vp7.second); + + v0 = mf.circuit_.target(e0); + Vertex v1 = mf.circuit_.target(e1); + Vertex v2 = mf.circuit_.target(e2); + v4 = mf.circuit_.target(e4); + Vertex v5 = mf.circuit_.target(e5); + v7 = mf.circuit_.target(e7); + + REQUIRE(v1 == v4); + REQUIRE(v0 == v5); + REQUIRE(v2 == v7); + + mf.advance_next_2qb_slice(1); + vp0 = mf.quantum_boundary->get().find(nodes[0])->second; + vp1 = mf.quantum_boundary->get().find(nodes[1])->second; + vp2 = mf.quantum_boundary->get().find(nodes[2])->second; + VertPort vp3 = mf.quantum_boundary->get().find(nodes[3])->second; + vp4 = mf.quantum_boundary->get().find(nodes[4])->second; + vp7 = mf.quantum_boundary->get().find(nodes[7])->second; + + e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + e1 = mf.circuit_.get_nth_out_edge(vp1.first, vp1.second); + e2 = mf.circuit_.get_nth_out_edge(vp2.first, vp2.second); + Edge e3 = mf.circuit_.get_nth_out_edge(vp3.first, vp3.second); + e4 = mf.circuit_.get_nth_out_edge(vp4.first, vp4.second); + e7 = mf.circuit_.get_nth_out_edge(vp7.first, vp7.second); + + v0 = mf.circuit_.target(e0); + v1 = mf.circuit_.target(e1); + v2 = mf.circuit_.target(e2); + Vertex v3 = mf.circuit_.target(e3); + v4 = mf.circuit_.target(e4); + v7 = mf.circuit_.target(e7); + + REQUIRE(v0 == v2); + REQUIRE(v1 == v4); + REQUIRE(v3 == v7); + } +} +SCENARIO("Test MappingFrontier::add_qubit") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[2]}); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + mf.add_qubit(nodes[3]); + + REQUIRE(circ.all_qubits().size() == 4); + REQUIRE(mf.circuit_.all_qubits().size() == 4); + REQUIRE(mf.quantum_boundary->size() == 4); + REQUIRE(mf.quantum_boundary->find(nodes[3]) != mf.quantum_boundary->end()); +} + +SCENARIO("Test MappingFrontier::add_swap") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + mf.add_swap(nodes[0], nodes[1]); + + std::vector commands = mf.circuit_.get_commands(); + REQUIRE(commands.size() == 4); + Command swap_c = commands[0]; + unit_vector_t uids = {nodes[0], nodes[1]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + Command cx_c = commands[1]; + uids = {nodes[1], nodes[0]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CX)); + + cx_c = commands[2]; + uids = {nodes[0], nodes[2]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CX)); + + cx_c = commands[3]; + uids = {nodes[0], nodes[3]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CZ)); + + Node new_node("new_node", 8); + mf.add_swap(nodes[0], new_node); + + commands = mf.circuit_.get_commands(); + REQUIRE(commands.size() == 5); + swap_c = commands[0]; + uids = {nodes[0], nodes[1]}; + + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + swap_c = commands[1]; + uids = {nodes[0], new_node}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + cx_c = commands[2]; + uids = {nodes[1], new_node}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CX)); + + cx_c = commands[3]; + uids = {new_node, nodes[2]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CX)); + + cx_c = commands[4]; + uids = {new_node, nodes[3]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CZ)); +} +SCENARIO("Test MappingFrontier::add_bridge") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + mf.add_bridge(nodes[0], nodes[2], nodes[1]); + + std::vector commands = mf.circuit_.get_commands(); + REQUIRE(commands.size() == 3); + Command bridge_c = commands[0]; + unit_vector_t uids = {nodes[0], nodes[2], nodes[1]}; + REQUIRE(bridge_c.get_args() == uids); + REQUIRE(*bridge_c.get_op_ptr() == *get_op_ptr(OpType::BRIDGE)); + + Command cx_c = commands[1]; + uids = {nodes[1], nodes[2]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CX)); + + cx_c = commands[2]; + uids = {nodes[1], nodes[3]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CZ)); +} +SCENARIO("Test MappingFrontier set_quantum_boundary") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + + unit_vertport_frontier_t copy; + for (const std::pair& pair : + mf.quantum_boundary->get()) { + copy.insert({pair.first, pair.second}); + } + + VertPort vp0_c = copy.get().find(nodes[0])->second; + VertPort vp1_c = copy.get().find(nodes[1])->second; + VertPort vp2_c = copy.get().find(nodes[2])->second; + VertPort vp3_c = copy.get().find(nodes[3])->second; + + mf.advance_frontier_boundary(shared_arc); + + VertPort vp0_in = mf.quantum_boundary->get().find(nodes[0])->second; + VertPort vp1_in = mf.quantum_boundary->get().find(nodes[1])->second; + VertPort vp2_in = mf.quantum_boundary->get().find(nodes[2])->second; + VertPort vp3_in = mf.quantum_boundary->get().find(nodes[3])->second; + + REQUIRE(vp0_in.first != vp0_c.first); + REQUIRE(vp1_in.first != vp1_c.first); + REQUIRE(vp2_in.first != vp2_c.first); + REQUIRE(vp3_in.first != vp3_c.first); + + mf.set_quantum_boundary(copy); + + vp0_in = mf.quantum_boundary->get().find(nodes[0])->second; + vp1_in = mf.quantum_boundary->get().find(nodes[1])->second; + vp2_in = mf.quantum_boundary->get().find(nodes[2])->second; + vp3_in = mf.quantum_boundary->get().find(nodes[3])->second; + + REQUIRE(vp0_in.first == vp0_c.first); + REQUIRE(vp1_in.first == vp1_c.first); + REQUIRE(vp2_in.first == vp2_c.first); + REQUIRE(vp3_in.first == vp3_c.first); +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MappingManager.cpp b/tket/tests/test_MappingManager.cpp new file mode 100644 index 0000000000..e90fa7e5d6 --- /dev/null +++ b/tket/tests/test_MappingManager.cpp @@ -0,0 +1,37 @@ +#include +#include +#include +#include + +#include "Mapping/MappingManager.hpp" + +namespace tket { + +SCENARIO("Test MappingManager::route_circuit") { + Node node0("test_node", 0), node1("test_node", 1), node2("test_node", 2); + Architecture arc({{node0, node1}, {node1, node2}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + MappingManager test_mm(shared_arc); + RoutingMethod test_rm; + std::vector> test_vrm = {test_rm}; + GIVEN("More qubits than architecture has qubits.") { + Circuit circ(5); + REQUIRE_THROWS_AS( + test_mm.route_circuit(circ, test_vrm), MappingManagerError); + } + GIVEN("Circuit unmodified.") { + Circuit circ(2); + REQUIRE(!test_mm.route_circuit(circ, test_vrm)); + } + GIVEN("No method can route circuit.") { + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + std::map rename_map = { + {qubits[0], node0}, {qubits[1], node1}, {qubits[2], node2}}; + circ.rename_units(rename_map); + REQUIRE_THROWS_AS( + test_mm.route_circuit(circ, test_vrm), MappingManagerError); + } +} +} // namespace tket diff --git a/tket/tests/test_RoutingMethod.cpp b/tket/tests/test_RoutingMethod.cpp new file mode 100644 index 0000000000..8e1d7bc12f --- /dev/null +++ b/tket/tests/test_RoutingMethod.cpp @@ -0,0 +1,191 @@ +#include +#include +#include +#include + +#include "Mapping/MappingFrontier.hpp" +#include "Mapping/RoutingMethodCircuit.hpp" + +namespace tket { + +SCENARIO("Test RoutingMethod default methods.") { + RoutingMethod rm; + Architecture arc( + {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + Circuit circ(3); + std::shared_ptr mf = std::make_shared(circ); + REQUIRE(!rm.check_method(mf, shared_arc)); + unit_map_t empty; + REQUIRE(rm.routing_method(mf, shared_arc) == empty); +} + +// These two method are not completely reflective of what is necessary for +// routing Their design is to minimally test the required features of the +// methods, not to actually succesfully route a circuit +bool test_check_method(const Circuit& c, const ArchitecturePtr& a) { + if (c.n_qubits() > 2 && a->n_uids() > 2) { + return true; + } else { + return false; + } +} + +std::tuple test_routing_method_mf_swap_perm( + const Circuit& c, const ArchitecturePtr& a) { + Circuit copy(c); + std::vector qs = copy.all_qubits(); + std::vector ns = a->get_all_uids_vec(); + // enforce in tests that ns >= qs, this is testing purposes only so fine... + unit_map_t rename_map, final_map; + for (unsigned i = 0; i < qs.size(); i++) { + rename_map.insert({qs[i], ns[i]}); + final_map.insert({ns[i], ns[i]}); + } + copy.rename_units(rename_map); + MappingFrontier mf(copy); + // n.b. add_swap permutes out edge of both boundaries, + mf.add_swap(Node("t", 0), Node("t", 1)); + + return std::make_tuple(copy, rename_map, final_map); +} + +std::tuple test_routing_method_mf_swap_no_perm( + const Circuit& c, const ArchitecturePtr& a) { + Circuit copy(c); + std::vector qs = copy.all_qubits(); + std::vector ns = a->get_all_uids_vec(); + // enforce in tests that ns >= qs, this is testing purposes only so fine... + unit_map_t rename_map, final_map; + for (unsigned i = 0; i < qs.size(); i++) { + rename_map.insert({qs[i], ns[i]}); + final_map.insert({ns[i], ns[i]}); + } + copy.rename_units(rename_map); + MappingFrontier mf(copy); + // n.b. add_swap permutes out edge of both boundaries, + mf.add_swap(Node("t", 0), Node("t", 1)); + final_map[Node("t", 0)] = Node("t", 1); + final_map[Node("t", 1)] = Node("t", 0); + + return std::make_tuple(copy, rename_map, final_map); +} + +std::tuple test_routing_method_circuit_no_perm( + const Circuit& c, const ArchitecturePtr& a) { + Circuit copy(c.n_qubits()); + copy.add_op(OpType::SWAP, {0, 1}); + copy.add_op(OpType::CX, {1, 0}); + copy.add_op(OpType::CX, {1, 0}); + + std::vector qs = copy.all_qubits(); + std::vector ns = a->get_all_uids_vec(); + // enforce in tests that ns >= qs, this is testing purposes only so fine... + unit_map_t rename_map, final_map; + for (unsigned i = 0; i < qs.size(); i++) { + rename_map.insert({qs[i], ns[i]}); + final_map.insert({ns[i], ns[i]}); + } + copy.rename_units(rename_map); + MappingFrontier mf(copy); + final_map[Node("t", 0)] = Node("t", 1); + final_map[Node("t", 1)] = Node("t", 0); + return std::make_tuple(copy, rename_map, final_map); +} + +SCENARIO("Test RoutingMethodCircuit::check_method") { + RoutingMethodCircuit rmc( + test_routing_method_mf_swap_no_perm, test_check_method, 5, 5); + Circuit c(2), circ3(3); + c.add_op(OpType::CX, {0, 1}); + circ3.add_op(OpType::CX, {0, 2}); + circ3.add_op(OpType::CX, {2, 1}); + std::shared_ptr mf2 = std::make_shared(c); + std::shared_ptr mf3 = + std::make_shared(circ3); + + Architecture arc( + {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + REQUIRE(!rmc.check_method(mf2, shared_arc)); + REQUIRE(rmc.check_method(mf3, shared_arc)); +} +SCENARIO("Test RoutingMethodCircuit::route_method") { + Circuit comp(2); + comp.add_op(OpType::SWAP, {0, 1}); + comp.add_op(OpType::CX, {1, 0}); + comp.add_op(OpType::CX, {1, 0}); + comp.add_op(OpType::CX, {1, 0}); + comp.add_op(OpType::CX, {1, 0}); + auto qbs = comp.all_qubits(); + unit_map_t rename_map = {{qbs[0], Node("t", 0)}, {qbs[1], Node("t", 1)}}; + comp.rename_units(rename_map); + qubit_map_t permutation = { + {Node("t", 0), Node("t", 1)}, {Node("t", 1), Node("t", 0)}}; + comp.permute_boundary_output(permutation); + + GIVEN("Non-implicit Permutation method, using MappingFrontier::add_swap") { + RoutingMethodCircuit rmc( + test_routing_method_mf_swap_no_perm, test_check_method, 2, 2); + Circuit c(2); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + + std::shared_ptr mf = std::make_shared(c); + Architecture arc( + {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + unit_map_t output = rmc.routing_method(mf, shared_arc), empty; + REQUIRE(output == empty); + REQUIRE(c == comp); + } + GIVEN("Non-implicit Permutation method, using circuit replacement") { + RoutingMethodCircuit rmc( + test_routing_method_circuit_no_perm, test_check_method, 2, 2); + Circuit c(2); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + + std::shared_ptr mf = std::make_shared(c); + Architecture arc( + {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + unit_map_t output = rmc.routing_method(mf, shared_arc), empty; + REQUIRE(output == empty); + REQUIRE(c == comp); + } + GIVEN("Implicit Permutation method, using MappingFrontier::add_swap") { + RoutingMethodCircuit rmc( + test_routing_method_mf_swap_perm, test_check_method, 2, 2); + Circuit c(2); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + + std::shared_ptr mf = std::make_shared(c); + Architecture arc( + {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + unit_map_t output = rmc.routing_method(mf, shared_arc), empty; + REQUIRE(output == empty); + + Circuit comp1(2); + comp1.add_op(OpType::SWAP, {0, 1}); + comp1.add_op(OpType::CX, {1, 0}); + comp1.add_op(OpType::CX, {1, 0}); + comp1.add_op(OpType::CX, {0, 1}); + comp1.add_op(OpType::CX, {0, 1}); + qbs = comp1.all_qubits(); + rename_map = {{qbs[0], Node("t", 0)}, {qbs[1], Node("t", 1)}}; + comp1.rename_units(rename_map); + + REQUIRE(c == comp1); + } +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index bd8a27fbed..b5dd764afc 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -87,6 +87,11 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/test_Architectures.cpp ${TKET_TESTS_DIR}/test_Placement.cpp ${TKET_TESTS_DIR}/test_Routing.cpp + ${TKET_TESTS_DIR}/test_MappingFrontier.cpp + ${TKET_TESTS_DIR}/test_RoutingMethod.cpp + ${TKET_TESTS_DIR}/test_MappingManager.cpp + ${TKET_TESTS_DIR}/test_LexicographicalComparison.cpp + ${TKET_TESTS_DIR}/test_LexiRoute.cpp ${TKET_TESTS_DIR}/test_DeviceCharacterisation.cpp ${TKET_TESTS_DIR}/test_Clifford.cpp ${TKET_TESTS_DIR}/test_MeasurementSetup.cpp From 5b50029898ef73a0387e77ae14f4f0b20a916ced Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Tue, 26 Oct 2021 17:06:35 +0100 Subject: [PATCH 004/146] Add token swapping stage to routing v3 (#96) --- tket/src/Mapping/MappingManager.cpp | 15 +++++++- tket/tests/test_MappingManager.cpp | 53 +++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index 9cf826ce61..7581e36a98 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -1,6 +1,7 @@ #include "Mapping/MappingManager.hpp" #include "OpType/OpTypeFunctions.hpp" +#include "TokenSwapping/main_entry_functions.hpp" namespace tket { @@ -59,7 +60,19 @@ bool MappingManager::route_circuit( // true => can use held routing method if (rm.get().check_method(mapping_frontier, this->architecture_)) { valid_methods = true; - rm.get().routing_method(mapping_frontier, this->architecture_); + unit_map_t partial_permutation = + rm.get().routing_method(mapping_frontier, this->architecture_); + + if (partial_permutation.size() > 0) { + std::map node_map; + for (const auto& x : partial_permutation) { + node_map.insert({Node(x.first), Node(x.second)}); + } + for (const std::pair& swap : + get_swaps(*this->architecture_, node_map)) { + mapping_frontier->add_swap(swap.first, swap.second); + } + } break; } } diff --git a/tket/tests/test_MappingManager.cpp b/tket/tests/test_MappingManager.cpp index e90fa7e5d6..fac280924d 100644 --- a/tket/tests/test_MappingManager.cpp +++ b/tket/tests/test_MappingManager.cpp @@ -7,6 +7,31 @@ namespace tket { +class TokenSwappingTester : public RoutingMethod { + public: + TokenSwappingTester(){}; + + bool check_method( + const std::shared_ptr& /*mapping_frontier*/, + const ArchitecturePtr& /*architecture*/) const { + return true; + } + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + unit_map_t routing_method( + std::shared_ptr& /*mapping_frontier*/, + const ArchitecturePtr& /*architecture*/) const { + Node node0("test_node", 0), node1("test_node", 1), node2("test_node", 2); + return {{node0, node1}, {node1, node2}, {node2, node0}}; + } +}; + SCENARIO("Test MappingManager::route_circuit") { Node node0("test_node", 0), node1("test_node", 1), node2("test_node", 2); Architecture arc({{node0, node1}, {node1, node2}}); @@ -33,5 +58,33 @@ SCENARIO("Test MappingManager::route_circuit") { REQUIRE_THROWS_AS( test_mm.route_circuit(circ, test_vrm), MappingManagerError); } + GIVEN("Method that invokes a permutation from token swapping stage.") { + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + std::map rename_map = { + {qubits[0], node0}, {qubits[1], node1}, {qubits[2], node2}}; + circ.rename_units(rename_map); + TokenSwappingTester tst; + std::vector> test_ts_rm = {tst}; + test_mm.route_circuit(circ, test_ts_rm); + + std::vector commands = circ.get_commands(); + REQUIRE(commands.size() == 3); + Command c0 = commands[0]; + unit_vector_t uid_swap_12 = {node1, node2}; + REQUIRE(c0.get_args() == uid_swap_12); + REQUIRE(*c0.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + Command c1 = commands[1]; + unit_vector_t uid_swap_01 = {node0, node1}; + REQUIRE(c1.get_args() == uid_swap_01); + REQUIRE(*c1.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + Command c2 = commands[2]; + unit_vector_t uid_cx_10 = {node1, node0}; + REQUIRE(c2.get_args() == uid_cx_10); + REQUIRE(*c2.get_op_ptr() == *get_op_ptr(OpType::CX)); + } } } // namespace tket From 4c92d8d8bad0c670b128240141295f12a1628564 Mon Sep 17 00:00:00 2001 From: yao-cqc <75305462+yao-cqc@users.noreply.github.com> Date: Tue, 9 Nov 2021 10:11:56 +0000 Subject: [PATCH 005/146] Assert candidate swaps size (#108) * Add assertion that there are at least some swaps to trial * Pseudo code for Yao * Fix routing with measurements issue * Add classically controlled gates to lexiroute test Co-authored-by: sjdilkes --- tket/src/Mapping/LexiRoute.cpp | 10 +++++++-- tket/src/Mapping/MappingFrontier.cpp | 19 +++++++++++++--- tket/tests/test_LexiRoute.cpp | 33 ++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 5 deletions(-) diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index 2e5b70d55c..5c33ec94fb 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -464,6 +464,8 @@ void LexiRoute::solve(unsigned lookahead) { } swap_set_t candidate_swaps = this->get_candidate_swaps(); this->remove_swaps_decreasing(candidate_swaps); + + TKET_ASSERT(candidate_swaps.size() != 0); // Only want to substitute a single swap // check next layer of interacting qubits and remove swaps until only one // lexicographically superior swap is left @@ -488,17 +490,18 @@ void LexiRoute::solve(unsigned lookahead) { // architecture this->set_interacting_uids(true); } - + // find best swap auto it = candidate_swaps.end(); --it; + std::pair chosen_swap = *it; this->mapping_frontier_->set_quantum_boundary(copy); this->set_interacting_uids(); std::pair check = this->check_bridge(chosen_swap, lookahead); - // set for final time, to allow gates to be correctly inserted, but then leave // as is + // insert gates this->mapping_frontier_->set_quantum_boundary(copy); if (!check.first && !check.second) { // update circuit with new swap @@ -522,8 +525,10 @@ void LexiRoute::solve(unsigned lookahead) { this->mapping_frontier_->add_bridge(chosen_swap.second, central, target); } } + // TODO: Refactor the following to happen during add_swap and add_bridge // methods + // add ancilla qubits if necessary if (copy.size() < this->mapping_frontier_->quantum_boundary->size()) { // implies ancilla qubit is added // find ancilla qubit, find swap vertex and port by looking at boundary, @@ -557,6 +562,7 @@ void LexiRoute::solve(unsigned lookahead) { } } } + return; } diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index 03fd4cf3df..919482efa3 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -162,10 +162,23 @@ void MappingFrontier::advance_frontier_boundary( // TODO: add optional skip function later to skip vertices that don't have // physical requirements boundary_updated = false; - CutFrontier next_cut = this->circuit_.next_cut( + + std::shared_ptr frontier_edges = frontier_convert_vertport_to_edge( - this->circuit_, this->quantum_boundary), - std::make_shared()); + this->circuit_, this->quantum_boundary); + // Add all classical edges that share the same target + unsigned dummy_bit_index = 0; + for (const std::pair& pair : frontier_edges->get()) { + Vertex vert = this->circuit_.target(pair.second); + for (const Edge& e : + this->circuit_.get_in_edges_of_type(vert, EdgeType::Classical)) { + frontier_edges->insert({Bit(dummy_bit_index), e}); + dummy_bit_index++; + } + } + + CutFrontier next_cut = this->circuit_.next_cut( + frontier_edges, std::make_shared()); // For each vertex in a slice, if its physically permitted, update // quantum_boundary with quantum out edges from vertex (i.e. diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index 412d78f32c..a18a2d127d 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -242,6 +242,39 @@ SCENARIO("Test LexiRoute::solve") { LexiRoute lr1(shared_arc, mf); lr1.solve(20); } + + GIVEN( + "Single best solution, with measurements and classically controlled " + "gates.") { + Circuit circ(6, 1); + std::vector qubits = circ.all_qubits(); + circ.add_conditional_gate(OpType::CX, {}, {0, 2}, {0}, 1); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + circ.add_conditional_gate(OpType::X, {}, {0}, {0}, 1); + circ.add_op(OpType::Measure, {qubits[1], Bit(0)}); + circ.add_op(OpType::CX, {qubits[4], qubits[5]}); + circ.add_op(OpType::Measure, {qubits[3], Bit(0)}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[6]}, {qubits[5], nodes[5]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + + lr.solve(4); + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 7); + Command swap_c = commands[1]; + unit_vector_t uids = {nodes[1], nodes[2]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + } } SCENARIO("Test LexiRouteRoutingMethod") { std::vector nodes = { From 6592cfd3c28d609818c164c2a5271240e9ee5ce5 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 13 Dec 2021 14:50:55 +0000 Subject: [PATCH 006/146] Update Architecture method names --- pytket/binders/routing.cpp | 2 +- tket/src/Architecture/Architecture.cpp | 12 ++-- tket/src/Mapping/LexiRoute.cpp | 31 ++++++----- .../src/Mapping/LexicographicalComparison.cpp | 4 +- .../src/Mapping/LexicographicalComparison.hpp | 2 +- tket/src/Mapping/MappingFrontier.hpp | 2 +- tket/src/Mapping/MappingManager.cpp | 4 +- tket/src/Mapping/MappingManager.hpp | 2 +- tket/src/Mapping/RoutingMethodCircuit.cpp | 2 +- .../src/TokenSwapping/ArchitectureMapping.cpp | 4 +- .../src/TokenSwapping/ArchitectureMapping.hpp | 4 +- .../DistancesFromArchitecture.cpp | 2 +- .../NeighboursFromArchitecture.cpp | 2 +- .../TokenSwapping/main_entry_functions.hpp | 2 +- .../TestUtils/FullTsaTesting.hpp | 2 +- .../TestUtils/PartialTsaTesting.hpp | 2 +- .../TestUtils/ProblemGeneration.cpp | 2 +- .../TestUtils/ProblemGeneration.hpp | 2 +- tket/tests/TokenSwapping/test_FullTsa.cpp | 31 +---------- .../test_RiverFlowPathFinder.cpp | 8 --- .../TokenSwapping/test_VariousPartialTsa.cpp | 55 ------------------- .../test_main_entry_functions.cpp | 4 +- tket/tests/test_RoutingMethod.cpp | 8 +-- 23 files changed, 49 insertions(+), 140 deletions(-) diff --git a/pytket/binders/routing.cpp b/pytket/binders/routing.cpp index 7492bf764b..281f6a7fcf 100644 --- a/pytket/binders/routing.cpp +++ b/pytket/binders/routing.cpp @@ -127,7 +127,7 @@ PYBIND11_MODULE(routing, m) { "returns distance between them", py::arg("node_0"), py::arg("node_1")) .def( - "get_adjacent_nodes", &Architecture::get_neighbour_uids, + "get_adjacent_nodes", &Architecture::get_neighbour_nodes, "given a node, returns adjacent nodes in Architecture.", py::arg("node")) .def_property_readonly( diff --git a/tket/src/Architecture/Architecture.cpp b/tket/src/Architecture/Architecture.cpp index 2ebd612571..bf6507f19f 100644 --- a/tket/src/Architecture/Architecture.cpp +++ b/tket/src/Architecture/Architecture.cpp @@ -34,17 +34,17 @@ bool Architecture::valid_operation( return true; } else if (uids.size() == 2) { if (this->node_exists(uids[0]) && this->node_exists(uids[1]) && - (this->connection_exists(uids[0], uids[1]) || - this->connection_exists(uids[1], uids[0]))) { + (this->edge_exists(uids[0], uids[1]) || + this->edge_exists(uids[1], uids[0]))) { return true; } } else if (uids.size() == 3) { bool con_0_exists = - (this->connection_exists(uids[0], uids[1]) || - this->connection_exists(uids[1], uids[0])); + (this->edge_exists(uids[0], uids[1]) || + this->edge_exists(uids[1], uids[0])); bool con_1_exists = - (this->connection_exists(uids[2], uids[1]) || - this->connection_exists(uids[1], uids[2])); + (this->edge_exists(uids[2], uids[1]) || + this->edge_exists(uids[1], uids[2])); if (this->node_exists(uids[0]) && this->node_exists(uids[1]) && this->node_exists(uids[2]) && con_0_exists && con_1_exists) { return true; diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index 5c33ec94fb..c88ba22959 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -14,7 +14,7 @@ LexiRoute::LexiRoute( this->labelling_.insert({qb, qb}); Node n(qb); // store which Node have been asigned to Circuit already - if (this->architecture_->uid_exists(n)) { + if (this->architecture_->node_exists(n)) { this->assigned_nodes_.insert(n); } } @@ -88,7 +88,7 @@ bool LexiRoute::assign_at_distance( const UnitID& assignee, const Node& root, unsigned distances) { node_set_t valid_nodes; for (const Node& neighbour : - this->architecture_->uids_at_distance(root, distances)) { + this->architecture_->nodes_at_distance(root, distances)) { if (this->assigned_nodes_.find(neighbour) == this->assigned_nodes_.end() || this->mapping_frontier_->ancilla_nodes_.find(neighbour) != this->mapping_frontier_->ancilla_nodes_.end()) { @@ -147,9 +147,9 @@ bool LexiRoute::update_labelling() { bool relabelled = false; for (const auto& pair : this->interacting_uids_) { bool uid_0_exist = - this->architecture_->uid_exists(Node(this->labelling_[pair.first])); + this->architecture_->node_exists(Node(this->labelling_[pair.first])); bool uid_1_exist = - this->architecture_->uid_exists(Node(this->labelling_[pair.second])); + this->architecture_->node_exists(Node(this->labelling_[pair.second])); if (!uid_0_exist || !uid_1_exist) { relabelled = true; } @@ -160,13 +160,14 @@ bool LexiRoute::update_labelling() { if (this->assigned_nodes_.size() == 0) { // find nodes with best averaged distance to other nodes // place it there... - std::set max_degree_uids = this->architecture_->max_degree_uids(); - auto it = max_degree_uids.begin(); + std::set max_degree_nodes = + this->architecture_->max_degree_nodes(); + auto it = max_degree_nodes.begin(); lexicographical_distances_t winning_distances = this->architecture_->get_distances(*it); Node preserved_node = Node(*it); ++it; - for (; it != max_degree_uids.end(); ++it) { + for (; it != max_degree_nodes.end(); ++it) { lexicographical_distances_t comparison_distances = this->architecture_->get_distances(*it); if (comparison_distances < winning_distances) { @@ -254,8 +255,8 @@ void LexiRoute::set_interacting_uids(bool assigned_only) { // we can assume from how we iterate through pairs that each qubit // will only be found in one match if (!assigned_only || - (this->architecture_->uid_exists(Node(it->first)) && - this->architecture_->uid_exists(Node(jt->first)))) { + (this->architecture_->node_exists(Node(it->first)) && + this->architecture_->node_exists(Node(jt->first)))) { interacting_uids_.insert({it->first, jt->first}); interacting_uids_.insert({jt->first, it->first}); } @@ -272,7 +273,7 @@ swap_set_t LexiRoute::get_candidate_swaps() { for (const auto& interaction : this->interacting_uids_) { Node assigned_first = Node(this->labelling_[interaction.first]); std::vector adjacent_uids_0 = - this->architecture_->uids_at_distance(assigned_first, 1); + this->architecture_->nodes_at_distance(assigned_first, 1); if (adjacent_uids_0.size() == 0) { throw LexiRouteError( assigned_first.repr() + " has no adjacent Node in Architecture."); @@ -285,7 +286,7 @@ swap_set_t LexiRoute::get_candidate_swaps() { } Node assigned_second = Node(this->labelling_[interaction.second]); std::vector adjacent_uids_1 = - this->architecture_->uids_at_distance(assigned_second, 1); + this->architecture_->nodes_at_distance(assigned_second, 1); if (adjacent_uids_1.size() == 0) { throw LexiRouteError( assigned_first.repr() + " has no adjacent Node in Architecture."); @@ -393,10 +394,10 @@ std::pair LexiRoute::check_bridge( const std::pair LexiRoute::pair_distances( const Node& p0_first, const Node& p0_second, const Node& p1_first, const Node& p1_second) const { - if (!this->architecture_->uid_exists(p0_first) || - !this->architecture_->uid_exists(p0_second) || - !this->architecture_->uid_exists(p1_first) || - !this->architecture_->uid_exists(p1_second)) { + if (!this->architecture_->node_exists(p0_first) || + !this->architecture_->node_exists(p0_second) || + !this->architecture_->node_exists(p1_first) || + !this->architecture_->node_exists(p1_second)) { throw LexiRouteError( "Node passed to LexiRoute::pair_distances not in architecture."); } diff --git a/tket/src/Mapping/LexicographicalComparison.cpp b/tket/src/Mapping/LexicographicalComparison.cpp index 3d51a0fad8..a5e5ea69b4 100644 --- a/tket/src/Mapping/LexicographicalComparison.cpp +++ b/tket/src/Mapping/LexicographicalComparison.cpp @@ -15,8 +15,8 @@ LexicographicalComparison::LexicographicalComparison( lexicographical_distances_t distance_vector(diameter, 0); for (const auto& interaction : this->interacting_nodes_) { // If Node not in architecture, don't add - if (!this->architecture_->uid_exists(interaction.first) || - !this->architecture_->uid_exists(interaction.second)) { + if (!this->architecture_->node_exists(interaction.first) || + !this->architecture_->node_exists(interaction.second)) { throw LexicographicalComparisonError( "Constructor passed some interacting node not in architecture."); } diff --git a/tket/src/Mapping/LexicographicalComparison.hpp b/tket/src/Mapping/LexicographicalComparison.hpp index c6bef60bc3..0f8ccd0461 100644 --- a/tket/src/Mapping/LexicographicalComparison.hpp +++ b/tket/src/Mapping/LexicographicalComparison.hpp @@ -1,7 +1,7 @@ #ifndef _TKET_LexicographicalComparison_H_ #define _TKET_LexicographicalComparison_H_ -#include "Architecture/Architectures.hpp" +#include "Architecture/Architecture.hpp" #include "Utils/BiMapHeaders.hpp" #include "Utils/UnitID.hpp" diff --git a/tket/src/Mapping/MappingFrontier.hpp b/tket/src/Mapping/MappingFrontier.hpp index d17eb9988b..eb64afb46a 100644 --- a/tket/src/Mapping/MappingFrontier.hpp +++ b/tket/src/Mapping/MappingFrontier.hpp @@ -1,7 +1,7 @@ #ifndef _TKET_MappingFrontier_H_ #define _TKET_MappingFrontier_H_ -#include "Architecture/Architectures.hpp" +#include "Architecture/Architecture.hpp" #include "Circuit/Circuit.hpp" #include "Utils/BiMapHeaders.hpp" #include "Utils/UnitID.hpp" diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index 7581e36a98..4c70f9d3d6 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -16,11 +16,11 @@ bool MappingManager::route_circuit( // with more logical qubits than an Architecture has // physical qubits physically permitted - if (circuit.n_qubits() > this->architecture_->n_uids()) { + if (circuit.n_qubits() > this->architecture_->n_nodes()) { std::string error_string = "Circuit has" + std::to_string(circuit.n_qubits()) + " logical qubits. Architecture has " + - std::to_string(this->architecture_->n_uids()) + + std::to_string(this->architecture_->n_nodes()) + " physical qubits. Circuit to be routed can not have more " "qubits than the Architecture."; throw MappingManagerError(error_string); diff --git a/tket/src/Mapping/MappingManager.hpp b/tket/src/Mapping/MappingManager.hpp index 2db966512e..5715ac2d2b 100644 --- a/tket/src/Mapping/MappingManager.hpp +++ b/tket/src/Mapping/MappingManager.hpp @@ -1,7 +1,7 @@ #ifndef _TKET_MappingManager_H_ #define _TKET_MappingManager_H_ -#include "Architecture/Architectures.hpp" +#include "Architecture/Architecture.hpp" #include "Circuit/Circuit.hpp" #include "Mapping/RoutingMethod.hpp" #include "Utils/UnitID.hpp" diff --git a/tket/src/Mapping/RoutingMethodCircuit.cpp b/tket/src/Mapping/RoutingMethodCircuit.cpp index 9928b97ab8..1366c9e1bd 100644 --- a/tket/src/Mapping/RoutingMethodCircuit.cpp +++ b/tket/src/Mapping/RoutingMethodCircuit.cpp @@ -50,7 +50,7 @@ unit_map_t RoutingMethodCircuit::routing_method( unit_map_t swap_permutation; for (const auto& pair : new_labelling) { if (pair.first != pair.second && - architecture->uid_exists(Node(pair.first))) { + architecture->node_exists(Node(pair.first))) { swap_permutation.insert(pair); } } diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index 5d35024677..0379335e5f 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -8,7 +8,7 @@ namespace tsa_internal { ArchitectureMapping::ArchitectureMapping(const Architecture& arch) : m_arch(arch) { - const auto uids = arch.get_all_uids(); + const auto uids = arch.get_all_nodes(); m_vertex_to_node_mapping.reserve(uids.size()); for (const UnitID& uid : uids) { m_vertex_to_node_mapping.emplace_back(Node(uid)); @@ -60,7 +60,7 @@ const Architecture& ArchitectureMapping::get_architecture() const { std::vector ArchitectureMapping::get_edges() const { std::vector edges; - for (auto [node1, node2] : m_arch.get_connections_vec()) { + for (auto [node1, node2] : m_arch.get_all_edges_vec()) { edges.emplace_back(get_swap(get_vertex(node1), get_vertex(node2))); } return edges; diff --git a/tket/src/TokenSwapping/ArchitectureMapping.hpp b/tket/src/TokenSwapping/ArchitectureMapping.hpp index 3b56f3e45a..d3710fb09c 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.hpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.hpp @@ -1,7 +1,7 @@ #ifndef _TKET_TokenSwapping_ArchitectureMapping_H_ #define _TKET_TokenSwapping_ArchitectureMapping_H_ -#include "Architecture/Architectures.hpp" +#include "Architecture/Architecture.hpp" #include "TSAUtils/SwapFunctions.hpp" namespace tket { @@ -9,7 +9,7 @@ namespace tsa_internal { /** For mapping between nodes in an architecture and size_t vertex numbers. * The vertex numbers are merely the indices of each Node - * within the vector returned by the get_all_uids() function. + * within the vector returned by the get_all_nodes() function. * * For now, we don't want to use Node objects as (1) this would make * TokenSwapping dependent on other parts of Tket and hence less modular, diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp index b5db7c9f84..c0b4f597e4 100644 --- a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp @@ -62,7 +62,7 @@ size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { // other bizarre error causes distance zero to be returned. if (distance_entry == 0) { std::stringstream ss; - ss << "DistancesFromArchitecture: architecture has " << arch.n_uids() + ss << "DistancesFromArchitecture: architecture has " << arch.n_nodes() << " vertices, " << arch.n_connections() << " edges; returned diameter " << arch.get_diameter() << ", but d(" << vertex1 << "," << vertex2 diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp index d52b33e586..914171e387 100644 --- a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp @@ -33,7 +33,7 @@ const std::vector& NeighboursFromArchitecture::operator()( const auto& source_node = m_arch_mapping.get_node(vertex); const auto neighbour_nodes = - m_arch_mapping.get_architecture().get_neighbour_uids(source_node); + m_arch_mapping.get_architecture().get_neighbour_nodes(source_node); neighbours.reserve(neighbour_nodes.size()); diff --git a/tket/src/TokenSwapping/main_entry_functions.hpp b/tket/src/TokenSwapping/main_entry_functions.hpp index 2694202306..2dbfcd57fb 100644 --- a/tket/src/TokenSwapping/main_entry_functions.hpp +++ b/tket/src/TokenSwapping/main_entry_functions.hpp @@ -6,7 +6,7 @@ #include #include -#include "Architecture/Architectures.hpp" +#include "Architecture/Architecture.hpp" #include "Circuit/Circuit.hpp" namespace tket { diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp index 29c17474fb..90a27adc8b 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp @@ -1,7 +1,7 @@ #ifndef _TKET_TESTS_TokenSwapping_TestUtils_FullTsaTesting_H_ #define _TKET_TESTS_TokenSwapping_TestUtils_FullTsaTesting_H_ -#include "Architecture/Architectures.hpp" +#include "Architecture/Architecture.hpp" #include "TokenSwapping/PartialTsaInterface.hpp" #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp index f8605aa209..39c467693a 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp @@ -1,7 +1,7 @@ #ifndef _TKET_TESTS_TokenSwapping_TestUtils_PartialTsaTesting_H_ #define _TKET_TESTS_TokenSwapping_TestUtils_PartialTsaTesting_H_ -#include "Architecture/Architectures.hpp" +#include "Architecture/Architecture.hpp" #include "TokenSwapping/PartialTsaInterface.hpp" #include "TokenSwapping/RNG.hpp" diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp index 933ffb1d22..c0dd2507df 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp @@ -50,7 +50,7 @@ vector ProblemGenerator00::get_problems( // leading to different tests. const std::string& expected_summary) const { REQUIRE(step > 0); - const unsigned num_vertices = arch.n_uids(); + const unsigned num_vertices = arch.n_nodes(); TSProblemParameters00 params; vector vertex_mappings; diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp index 917e664e79..8d1dbd7db8 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp @@ -1,7 +1,7 @@ #ifndef _TKET_TESTS_TokenSwapping_TestUtils_ProblemGeneration_H_ #define _TKET_TESTS_TokenSwapping_TestUtils_ProblemGeneration_H_ -#include "Architecture/Architectures.hpp" +#include "Architecture/Architecture.hpp" #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" diff --git a/tket/tests/TokenSwapping/test_FullTsa.cpp b/tket/tests/TokenSwapping/test_FullTsa.cpp index 80b0a19b48..c3b7907265 100644 --- a/tket/tests/TokenSwapping/test_FullTsa.cpp +++ b/tket/tests/TokenSwapping/test_FullTsa.cpp @@ -139,35 +139,6 @@ SCENARIO("Full TSA: Rings") { "[Winners: joint: 231 252 394 397 400 394 undisputed: 0 0 0 0 3 0]"); } -SCENARIO("Full TSA: fully connected") { - const vector problem_messages{ - "[K3: 51582: v3 i1 f100 s1: 100 problems; 135 tokens]", - "[K5: 51644: v5 i1 f100 s1: 100 problems; 224 tokens]", - "[K10: 51634: v10 i1 f100 s1: 100 problems; 469 tokens]", - "[K20: 51498: v20 i1 f100 s1: 100 problems; 974 tokens]"}; - - const vector num_vertices{3, 5, 10, 20}; - FullTester tester; - tester.test_name = "FullyConn"; - std::string arch_name; - - for (size_t index = 0; index < problem_messages.size(); ++index) { - const FullyConnected arch(num_vertices[index]); - arch_name = "K" + std::to_string(num_vertices[index]); - tester.add_problems(arch, arch_name, problem_messages[index]); - } - CHECK( - tester.results.str() == - "[FullyConn:HybridTSA_00: 400 probs; 1802 toks; 867 tot.lb]\n" - "[Total swaps: 1435 1435 1435 1435 1435 1435]\n" - "[Winners: joint: 400 400 400 400 400 400 undisputed: 0 0 0 0 0 0]"); - - CHECK( - tester.trivial_results.str() == - "[FullyConn:Trivial: 400 probs; 1802 toks; 867 tot.lb]\n" - "[Total swaps: 1435 1435 1435 1435 1435 1435]\n" - "[Winners: joint: 400 400 400 400 400 400 undisputed: 0 0 0 0 0 0]"); -} SCENARIO("Full TSA: Square Grids") { const vector> grid_parameters = { @@ -220,7 +191,7 @@ SCENARIO("Full TSA: Random trees") { const auto edges = tree_generator.get_tree_edges(tester.rng); const Architecture arch(edges); - REQUIRE(arch.n_uids() == edges.size() + 1); + REQUIRE(arch.n_nodes() == edges.size() + 1); arch_name = "Tree" + std::to_string(index); tester.add_problems(arch, arch_name, problem_messages[index]); } diff --git a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp index 412d4d776e..794ebfee7c 100644 --- a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp +++ b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp @@ -218,14 +218,6 @@ static void test(TestResult& result, const Architecture& arch, RNG& rng) { arch_mapping.number_of_vertices(), rng); } -SCENARIO("Path generation for complete graph") { - RNG rng; - TestResult result; - const FullyConnected arch(5); - test(result, arch, rng); - REQUIRE(result.str() == "[ Number of path calls: 250 Extra paths: 0 ]"); -} - SCENARIO("Path generation for ring graph") { RNG rng; TestResult result; diff --git a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp index c9434d37d0..9b88c48519 100644 --- a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp +++ b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp @@ -169,61 +169,6 @@ SCENARIO("Partial TSA: Rings") { } } -SCENARIO("Partial TSA: Fully connected") { - const vector problem_messages{ - "[K5: 51644: v5 i1 f100 s1: 100 problems; 224 tokens]", - "[K9: 51665: v9 i1 f100 s1: 100 problems; 416 tokens]"}; - - Tester tester; - tester.messages_full_trivial_tsa = { - "[TSA=Trivial FULL PF=RiverFlow\n" - "224 tokens; 172 total L; 149 swaps.\n" - "L-decr %: min 100, max 100, av 100.\n" - "Power %: min 50, max 100, av 64]", - - "[TSA=Trivial FULL PF=RiverFlow\n" - "416 tokens; 378 total L; 342 swaps.\n" - "L-decr %: min 100, max 100, av 100.\n" - "Power %: min 50, max 100, av 56]", - }; - - tester.messages_partial_trivial_tsa = { - "[TSA=Trivial NONZERO PF=RiverFlow\n" - "224 tokens; 172 total L; 84 swaps.\n" - "L-decr %: min 25, max 100, av 74.\n" - "Power %: min 50, max 100, av 63]", - - "[TSA=Trivial NONZERO PF=RiverFlow\n" - "416 tokens; 378 total L; 98 swaps.\n" - "L-decr %: min 12, max 100, av 46.\n" - "Power %: min 50, max 100, av 58]"}; - - tester.messages_cycles_tsa_0 = { - "[TSA=Cycles PF=RiverFlow\n" - "224 tokens; 172 total L; 149 swaps.\n" - "L-decr %: min 100, max 100, av 100.\n" - "Power %: min 50, max 100, av 64]", - - "[TSA=Cycles PF=RiverFlow\n" - "416 tokens; 378 total L; 342 swaps.\n" - "L-decr %: min 100, max 100, av 100.\n" - "Power %: min 50, max 100, av 56]"}; - - std::string arch_name; - const ProblemGenerator00 generator; - - for (size_t index = 0; index < problem_messages.size(); ++index) { - auto num_vertices = 4 * index + 5; - const FullyConnected arch(num_vertices); - arch_name = "K" + std::to_string(num_vertices); - tester.rng.set_seed(); - const auto problems = generator.get_problems( - arch_name, arch, tester.rng, problem_messages[index]); - - tester.run_test(arch, problems, index); - } -} - SCENARIO("Partial TSA: Square grid") { const vector> grid_parameters = { {2, 3, 3}, {5, 5, 3}}; diff --git a/tket/tests/TokenSwapping/test_main_entry_functions.cpp b/tket/tests/TokenSwapping/test_main_entry_functions.cpp index 9711b2afcd..30d381bae6 100644 --- a/tket/tests/TokenSwapping/test_main_entry_functions.cpp +++ b/tket/tests/TokenSwapping/test_main_entry_functions.cpp @@ -19,8 +19,8 @@ SCENARIO("main entry function for TSA") { std::stringstream problem_ss; const SquareGrid arch(3, 4, 2); - const auto nodes = arch.get_all_uids_vec(); - const auto edges = arch.get_connections_vec(); + const auto nodes = arch.get_all_nodes_vec(); + const auto edges = arch.get_all_edges_vec(); problem_ss << nodes.size() << " nodes; " << edges.size() << " edges."; // The value is the set of all neighbouring nodes. diff --git a/tket/tests/test_RoutingMethod.cpp b/tket/tests/test_RoutingMethod.cpp index 8e1d7bc12f..3cc51cbb0a 100644 --- a/tket/tests/test_RoutingMethod.cpp +++ b/tket/tests/test_RoutingMethod.cpp @@ -24,7 +24,7 @@ SCENARIO("Test RoutingMethod default methods.") { // routing Their design is to minimally test the required features of the // methods, not to actually succesfully route a circuit bool test_check_method(const Circuit& c, const ArchitecturePtr& a) { - if (c.n_qubits() > 2 && a->n_uids() > 2) { + if (c.n_qubits() > 2 && a->n_nodes() > 2) { return true; } else { return false; @@ -35,7 +35,7 @@ std::tuple test_routing_method_mf_swap_perm( const Circuit& c, const ArchitecturePtr& a) { Circuit copy(c); std::vector qs = copy.all_qubits(); - std::vector ns = a->get_all_uids_vec(); + std::vector ns = a->get_all_nodes_vec(); // enforce in tests that ns >= qs, this is testing purposes only so fine... unit_map_t rename_map, final_map; for (unsigned i = 0; i < qs.size(); i++) { @@ -54,7 +54,7 @@ std::tuple test_routing_method_mf_swap_no_perm( const Circuit& c, const ArchitecturePtr& a) { Circuit copy(c); std::vector qs = copy.all_qubits(); - std::vector ns = a->get_all_uids_vec(); + std::vector ns = a->get_all_nodes_vec(); // enforce in tests that ns >= qs, this is testing purposes only so fine... unit_map_t rename_map, final_map; for (unsigned i = 0; i < qs.size(); i++) { @@ -79,7 +79,7 @@ std::tuple test_routing_method_circuit_no_perm( copy.add_op(OpType::CX, {1, 0}); std::vector qs = copy.all_qubits(); - std::vector ns = a->get_all_uids_vec(); + std::vector ns = a->get_all_nodes_vec(); // enforce in tests that ns >= qs, this is testing purposes only so fine... unit_map_t rename_map, final_map; for (unsigned i = 0; i < qs.size(); i++) { From b3cda7311e9709d7d014f7344acc7924ee0a8b19 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 13 Dec 2021 14:56:50 +0000 Subject: [PATCH 007/146] get_all_nodes -> nodes --- tket/src/TokenSwapping/ArchitectureMapping.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index 0379335e5f..ca55f96f03 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -8,7 +8,7 @@ namespace tsa_internal { ArchitectureMapping::ArchitectureMapping(const Architecture& arch) : m_arch(arch) { - const auto uids = arch.get_all_nodes(); + const auto uids = arch.nodes(); m_vertex_to_node_mapping.reserve(uids.size()); for (const UnitID& uid : uids) { m_vertex_to_node_mapping.emplace_back(Node(uid)); From 24be612a45aed5ea0cdd460d7bef97a83dcced75 Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Fri, 7 Jan 2022 14:22:05 +0000 Subject: [PATCH 008/146] Update Compilation Passes to use RoutingV3 (#115) * Add token swapping stage, add test * Update compilation passes to use new routing * Add json serialization * Continue adding JSON serialisation for routing_config * Improve Json definitions * Update JSON Serialization and use of Barrier * Change from reference_wrapper to shared_ptr * Add JSON_DECL for std::vector * format routing_test * Fix up tests and binders for python * Uncoment measurement tests * rename method to merge_ancilla * debug proptest * Make add_qubit add qubit to unit_bimaps_ if not nullptr * Architectures -> Architecture * Install boost on MacOS. * comments to debug * update proptest to support ancillas properly * remove couts * format * Make Unitary dimensions match * add tket assert for comparison * Update test to check value * add_qubit -> add_ancilla * Remove kwargs formatting from argument * Rename Architecture Methods * rename architecture methods * Allow architecture mapping to take original edges, to calculate Node to size_t mapping * add get_square_grid_edges, to allow fixed tests independent of SquareGrid * use ArchitectureMapping and edges in most tests, instead of Architecture * trivial typos, comments, cmake update * add copyright notices, pragma once, remove semicolon typos * update binders for inheritance and docs * format * Remove NodeGraph * update formatting * Update CMakeLists and Setup.py * Use explicit shared_ptr * Refactor Routing module binder Make "FullMappingPass" use a kwargs based argument to get round faulty docs type definitions. * remove trailing whitespace * update clang formatting * reformat file * update orientation of BRIDGE gates * Update conf docs mapping, remove kwargs full mapping pass Co-authored-by: Alec Edgington Co-authored-by: Zen Harper --- pytket/CMakeLists.txt | 4 +- pytket/binders/architecture.cpp | 176 +++++++++ pytket/binders/mapping.cpp | 10 +- pytket/binders/passes.cpp | 56 +-- pytket/binders/placement.cpp | 219 +++++++++++ pytket/binders/routing.cpp | 339 ------------------ pytket/docs/changelog.rst | 1 - pytket/docs/conf.py | 3 +- pytket/pytket/__init__.py | 2 + pytket/pytket/architecture/__init__.py | 14 + pytket/pytket/backends/backendinfo.py | 2 +- pytket/pytket/placement/__init__.py | 15 + pytket/setup.py | 4 +- pytket/tests/backend_test.py | 3 +- pytket/tests/backendinfo_test.py | 2 +- pytket/tests/mapping_test.py | 2 +- pytket/tests/mitigation_test.py | 3 +- pytket/tests/predicates_test.py | 14 +- pytket/tests/routing_test.py | 37 +- pytket/tests/strategies.py | 2 +- schemas/compiler_pass_v1.json | 41 +-- tket/src/CMakeLists.txt | 1 + tket/src/Mapping/LexiRoute.cpp | 173 +++------ tket/src/Mapping/LexiRoute.hpp | 28 +- tket/src/Mapping/MappingFrontier.cpp | 111 +++++- tket/src/Mapping/MappingFrontier.hpp | 27 +- tket/src/Mapping/MappingManager.cpp | 7 +- tket/src/Mapping/MappingManager.hpp | 3 +- tket/src/Mapping/RoutingMethod.hpp | 8 + tket/src/Mapping/RoutingMethodCircuit.cpp | 1 + tket/src/Mapping/RoutingMethodCircuit.hpp | 3 + tket/src/Mapping/RoutingMethodJson.cpp | 36 ++ tket/src/Mapping/RoutingMethodJson.hpp | 24 ++ tket/src/Predicates/CompilerPass.cpp | 9 +- tket/src/Predicates/PassGenerators.cpp | 27 +- tket/src/Predicates/PassGenerators.hpp | 14 +- .../src/TokenSwapping/ArchitectureMapping.cpp | 60 ++++ .../src/TokenSwapping/ArchitectureMapping.hpp | 37 +- tket/src/TokenSwapping/BestFullTsa.cpp | 14 + tket/src/TokenSwapping/BestFullTsa.hpp | 19 +- .../TokenSwapping/CyclesCandidateManager.cpp | 14 + .../TokenSwapping/CyclesCandidateManager.hpp | 18 +- .../src/TokenSwapping/CyclesGrowthManager.cpp | 14 + .../src/TokenSwapping/CyclesGrowthManager.hpp | 18 +- tket/src/TokenSwapping/CyclesPartialTsa.cpp | 14 + tket/src/TokenSwapping/CyclesPartialTsa.hpp | 18 +- .../TokenSwapping/CyclicShiftCostEstimate.cpp | 15 +- .../TokenSwapping/CyclicShiftCostEstimate.hpp | 18 +- .../DistancesFromArchitecture.cpp | 14 + .../DistancesFromArchitecture.hpp | 18 +- tket/src/TokenSwapping/DistancesInterface.cpp | 15 +- tket/src/TokenSwapping/DistancesInterface.hpp | 18 +- .../src/TokenSwapping/DynamicTokenTracker.cpp | 16 +- .../src/TokenSwapping/DynamicTokenTracker.hpp | 18 +- tket/src/TokenSwapping/HybridTsa00.cpp | 15 +- tket/src/TokenSwapping/HybridTsa00.hpp | 18 +- .../NeighboursFromArchitecture.cpp | 16 +- .../NeighboursFromArchitecture.hpp | 18 +- .../src/TokenSwapping/NeighboursInterface.cpp | 14 + .../src/TokenSwapping/NeighboursInterface.hpp | 18 +- .../src/TokenSwapping/PartialTsaInterface.cpp | 14 + .../src/TokenSwapping/PartialTsaInterface.hpp | 18 +- .../src/TokenSwapping/PathFinderInterface.cpp | 14 + .../src/TokenSwapping/PathFinderInterface.hpp | 18 +- tket/src/TokenSwapping/RNG.hpp | 18 +- .../src/TokenSwapping/RiverFlowPathFinder.cpp | 15 +- .../src/TokenSwapping/RiverFlowPathFinder.hpp | 18 +- tket/src/TokenSwapping/SwapListOptimiser.cpp | 14 + tket/src/TokenSwapping/SwapListOptimiser.hpp | 18 +- .../TokenSwapping/TSAUtils/DebugFunctions.cpp | 14 + .../TokenSwapping/TSAUtils/DebugFunctions.hpp | 18 +- .../TSAUtils/DistanceFunctions.cpp | 16 +- .../TSAUtils/DistanceFunctions.hpp | 18 +- .../TSAUtils/GeneralFunctions.cpp | 16 +- .../TSAUtils/GeneralFunctions.hpp | 18 +- .../TokenSwapping/TSAUtils/SwapFunctions.cpp | 16 +- .../TokenSwapping/TSAUtils/SwapFunctions.hpp | 18 +- .../TSAUtils/VertexMappingFunctions.cpp | 16 +- .../TSAUtils/VertexMappingFunctions.hpp | 18 +- .../TSAUtils/VertexSwapResult.cpp | 16 +- .../TSAUtils/VertexSwapResult.hpp | 18 +- .../TableLookup/CanonicalRelabelling.cpp | 14 + .../TableLookup/CanonicalRelabelling.hpp | 18 +- .../TableLookup/ExactMappingLookup.cpp | 14 + .../TableLookup/ExactMappingLookup.hpp | 17 +- .../TableLookup/FilteredSwapSequences.cpp | 14 + .../TableLookup/FilteredSwapSequences.hpp | 18 +- .../TableLookup/PartialMappingLookup.cpp | 15 +- .../TableLookup/PartialMappingLookup.hpp | 18 +- .../TableLookup/SwapConversion.cpp | 15 +- .../TableLookup/SwapConversion.hpp | 18 +- .../TableLookup/SwapListSegmentOptimiser.cpp | 15 +- .../TableLookup/SwapListSegmentOptimiser.hpp | 18 +- .../TableLookup/SwapListTableOptimiser.cpp | 16 +- .../TableLookup/SwapListTableOptimiser.hpp | 18 +- .../TableLookup/SwapSequenceTable.cpp | 14 + .../TableLookup/SwapSequenceTable.hpp | 18 +- .../TableLookup/VertexMapResizing.cpp | 15 +- .../TableLookup/VertexMapResizing.hpp | 18 +- tket/src/TokenSwapping/TrivialTSA.cpp | 15 +- tket/src/TokenSwapping/TrivialTSA.hpp | 18 +- tket/src/TokenSwapping/VectorListHybrid.hpp | 18 +- .../VectorListHybridSkeleton.cpp | 14 + .../VectorListHybridSkeleton.hpp | 18 +- .../TokenSwapping/main_entry_functions.cpp | 14 + .../TokenSwapping/main_entry_functions.hpp | 18 +- .../Data/FixedCompleteSolutions.cpp | 14 + .../Data/FixedCompleteSolutions.hpp | 19 +- .../TokenSwapping/Data/FixedSwapSequences.cpp | 14 + .../TokenSwapping/Data/FixedSwapSequences.hpp | 18 +- .../TableLookup/NeighboursFromEdges.cpp | 16 +- .../TableLookup/NeighboursFromEdges.hpp | 18 +- .../TableLookup/PermutationTestUtils.cpp | 14 + .../TableLookup/PermutationTestUtils.hpp | 18 +- .../SwapSequenceReductionTester.cpp | 15 +- .../SwapSequenceReductionTester.hpp | 18 +- .../TableLookup/test_CanonicalRelabelling.cpp | 15 +- .../TableLookup/test_ExactMappingLookup.cpp | 15 +- .../test_FilteredSwapSequences.cpp | 15 +- .../test_SwapSequenceReductions.cpp | 15 +- .../TableLookup/test_SwapSequenceTable.cpp | 15 +- .../ArchitectureEdgesReimplementation.cpp | 62 ++++ .../ArchitectureEdgesReimplementation.hpp | 36 ++ .../TokenSwapping/TestUtils/BestTsaTester.cpp | 17 +- .../TokenSwapping/TestUtils/BestTsaTester.hpp | 18 +- .../TestUtils/DecodedProblemData.cpp | 15 +- .../TestUtils/DecodedProblemData.hpp | 18 +- .../TestUtils/FullTsaTesting.cpp | 21 +- .../TestUtils/FullTsaTesting.hpp | 25 +- .../TestUtils/PartialTsaTesting.cpp | 26 +- .../TestUtils/PartialTsaTesting.hpp | 28 +- .../TestUtils/ProblemGeneration.cpp | 23 +- .../TestUtils/ProblemGeneration.hpp | 20 +- .../TestUtils/TestStatsStructs.cpp | 16 +- .../TestUtils/TestStatsStructs.hpp | 16 +- .../test_ArchitectureMappingEndToEnd.cpp | 16 +- .../test_BestTsaFixedSwapSequences.cpp | 15 +- .../test_DistancesFromArchitecture.cpp | 17 +- tket/tests/TokenSwapping/test_FullTsa.cpp | 78 +++- .../test_RiverFlowPathFinder.cpp | 30 +- tket/tests/TokenSwapping/test_SwapList.cpp | 14 + .../TokenSwapping/test_SwapListOptimiser.cpp | 15 +- .../TokenSwapping/test_VariousPartialTsa.cpp | 48 ++- .../TokenSwapping/test_VectorListHybrid.cpp | 15 +- .../test_VectorListHybridSkeleton.cpp | 15 +- .../test_main_entry_functions.cpp | 28 +- tket/tests/test_CompilerPass.cpp | 22 +- tket/tests/test_LexiRoute.cpp | 12 +- tket/tests/test_MappingFrontier.cpp | 2 +- tket/tests/test_MappingManager.cpp | 7 +- tket/tests/test_Routing.cpp | 5 +- tket/tests/test_json.cpp | 17 +- tket/tests/tkettestsfiles.cmake | 1 + 153 files changed, 2670 insertions(+), 902 deletions(-) create mode 100644 pytket/binders/architecture.cpp create mode 100644 pytket/binders/placement.cpp create mode 100644 pytket/pytket/architecture/__init__.py create mode 100644 pytket/pytket/placement/__init__.py create mode 100644 tket/src/Mapping/RoutingMethodJson.cpp create mode 100644 tket/src/Mapping/RoutingMethodJson.hpp create mode 100644 tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp create mode 100644 tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp diff --git a/pytket/CMakeLists.txt b/pytket/CMakeLists.txt index 6750308268..7b9d0ae160 100644 --- a/pytket/CMakeLists.txt +++ b/pytket/CMakeLists.txt @@ -54,6 +54,7 @@ build_module(circuit binders/circuit/Circuit/add_op.cpp binders/circuit/Circuit/add_classical_op.cpp) build_module(routing binders/routing.cpp) +build_module(mapping binders/mapping.cpp) build_module(transform binders/transform.cpp) build_module(predicates binders/predicates.cpp) build_module(passes binders/passes.cpp) @@ -63,7 +64,8 @@ build_module(pauli binders/pauli.cpp) build_module(logging binders/logging.cpp) build_module(utils_serialization binders/utils_serialization.cpp) build_module(tailoring binders/tailoring.cpp) -build_module(mapping binders/mapping.cpp) build_module(zx binders/zx/diagram.cpp binders/zx/rewrite.cpp) +build_module(architecture binders/architecture.cpp) +build_module(placement binders/placement.cpp) diff --git a/pytket/binders/architecture.cpp b/pytket/binders/architecture.cpp new file mode 100644 index 0000000000..8a2ff121c5 --- /dev/null +++ b/pytket/binders/architecture.cpp @@ -0,0 +1,176 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Architecture/Architecture.hpp" + +#include +#include +#include +#include + +#include "Utils/Json.hpp" +#include "binder_json.hpp" + +namespace py = pybind11; +using json = nlohmann::json; + +namespace tket { + +PYBIND11_MODULE(architecture, m) { + py::class_( + m, "Architecture", + "Class describing the connectivity of qubits on a general device.") + .def( + py::init([](const std::vector> + &connections) { return Architecture(connections); }), + "The constructor for an architecture with connectivity " + "between qubits.\n\n:param connections: A list of pairs " + "representing qubit indices that can perform two-qubit " + "operations", + py::arg("connections")) + .def( + py::init> &>(), + "The constructor for an architecture with connectivity " + "between qubits.\n\n:param connections: A list of pairs " + "representing Nodes that can perform two-qubit operations", + py::arg("connections")) + .def( + "__repr__", + [](const Architecture &arc) { + return ""; + }) + .def( + "get_distance", &Architecture::get_distance, + "given two nodes in Architecture, " + "returns distance between them", + py::arg("node_0"), py::arg("node_1")) + .def( + "get_adjacent_nodes", &Architecture::get_neighbour_nodes, + "given a node, returns adjacent nodes in Architecture.", + py::arg("node")) + .def_property_readonly( + "nodes", &Architecture::get_all_nodes_vec, + "Returns all nodes of architecture as Node objects.") + .def_property_readonly( + "coupling", &Architecture::get_all_edges_vec, + "Returns the coupling map of the Architecture as " + "UnitIDs. ") + .def( + "to_dict", [](const Architecture &arch) { return json(arch); }, + "Return a JSON serializable dict representation of " + "the Architecture.\n" + ":return: dict containing nodes and links.") + .def_static( + "from_dict", [](const json &j) { return j.get(); }, + "Construct Architecture instance from JSON serializable " + "dict representation of the Architecture.") + // as far as Python is concerned, Architectures are immutable + .def( + "__deepcopy__", + [](const Architecture &arc, py::dict = py::dict()) { return arc; }) + .def( + "__repr__", + [](const Architecture &arc) { + return ""; + }) + .def(py::self == py::self); + py::class_, Architecture>( + m, "SquareGrid", + "Architecture class for qubits arranged in a square lattice of " + "given number of rows and columns. Qubits are arranged with qubits " + "values increasing first along rows then along columns i.e. for a " + "3 x 3 grid:\n\n 0 1 2\n\n 3 4 5\n\n 6 7 8") + .def( + py::init(), + "The constructor for a Square Grid architecture with some " + "undirected connectivity between qubits.\n\n:param n_rows: " + "The number of rows in the grid\n:param n_columns: The number " + "of columns in the grid", + py::arg("n_rows"), py::arg("n_columns")) + .def( + py::init(), + "The constructor for a Square Grid architecture with some " + "undirected connectivity between qubits.\n\n:param n_rows: " + "The number of rows in the grid\n:param n_columns: The number " + "of columns in the grid\n:param n_layers: The number of " + "layers of grids", + py::arg("n_rows"), py::arg("n_columns"), py::arg("n_layers")) + .def( + "squind_to_qind", + [](const SquareGrid &self, const unsigned row, const unsigned col) { + return self.squind_to_qind(row, col); + }, + "Converts a (row,column) index for a square grid to a " + "single " + "qubit index\n\n:param row: The given row index\n:param " + "column: The given column index\n:return: the " + "corresponding " + "global qubit index", + py::arg("row"), py::arg("column")) + .def( + "qind_to_squind", &SquareGrid::qind_to_squind, + "Converts a single qubit index to a (row,column) index for a " + "square grid.\n\n:param index: The global qubit " + "index\n:return: the corresponding grid index as a pair " + "(row,column)", + py::arg("index")) + // as far as Python is concerned, Architectures are immutable + .def( + "__deepcopy__", + [](const SquareGrid &arc, py::dict = py::dict()) { return arc; }) + .def("__repr__", [](const SquareGrid &arc) { + return ""; + }); + py::class_, Architecture>( + m, "RingArch", + "Architecture class for number of qubits arranged in a ring.") + .def( + py::init(), + "The constructor for a RingArchitecture with some undirected " + "connectivity between qubits.\n\n:param number of qubits", + py::arg("nodes")) + .def("__repr__", [](const RingArch &arc) { + return ""; + }); + py::class_( + m, "FullyConnected", + "An architecture with full connectivity between qubits.") + .def( + py::init(), + "Construct a fully-connected architecture." + "\n\n:param n: number of qubits", + py::arg("n")) + .def( + "__repr__", + [](const FullyConnected &arc) { + return ""; + }) + .def(py::self == py::self) + .def_property_readonly( + "nodes", &FullyConnected::get_all_nodes_vec, + "All nodes of the architecture as :py:class:`Node` objects.") + .def( + "to_dict", [](const FullyConnected &arch) { return json(arch); }, + "JSON-serializable dict representation of the architecture." + "\n\n:return: dict containing nodes") + .def_static( + "from_dict", [](const json &j) { return j.get(); }, + "Construct FullyConnected instance from dict representation."); +} +} // namespace tket diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index 5e8cee5dcc..f8d07e75b0 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -10,13 +10,15 @@ namespace py = pybind11; namespace tket { PYBIND11_MODULE(mapping, m) { - py::class_( + py::class_>( m, "RoutingMethod", "Parent class for RoutingMethod, for inheritance purposes only, not for " "usage.") .def(py::init<>()); - py::class_( + py::class_< + RoutingMethodCircuit, std::shared_ptr, + RoutingMethod>( m, "RoutingMethodCircuit", "The RoutingMethod class captures a method for partially mapping logical" "subcircuits to physical operations as permitted by some architecture. " @@ -43,7 +45,9 @@ PYBIND11_MODULE(mapping, m) { py::arg("route_subcircuit"), py::arg("check_subcircuit"), py::arg("max_size"), py::arg("max_depth")); - py::class_( + py::class_< + LexiRouteRoutingMethod, std::shared_ptr, + RoutingMethod>( m, "LexiRouteRoutingMethod", "Defines a RoutingMethod object for mapping circuits that uses the " "Lexicographical Comparison approach outlined in arXiv:1902.08091.") diff --git a/pytket/binders/passes.cpp b/pytket/binders/passes.cpp index 0e1e87dcf2..81f5e01efd 100644 --- a/pytket/binders/passes.cpp +++ b/pytket/binders/passes.cpp @@ -15,6 +15,7 @@ #include #include "ArchAwareSynth/SteinerForest.hpp" +#include "Mapping/RoutingMethod.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" #include "Predicates/PassLibrary.hpp" @@ -28,34 +29,28 @@ using json = nlohmann::json; namespace tket { -void update_routing_config(RoutingConfig &config, py::kwargs kwargs) { - if (kwargs.contains("swap_lookahead")) - config.depth_limit = py::cast(kwargs["swap_lookahead"]); - if (kwargs.contains("bridge_lookahead")) - config.distrib_limit = py::cast(kwargs["bridge_lookahead"]); - if (kwargs.contains("bridge_interactions")) - config.interactions_limit = - py::cast(kwargs["bridge_interactions"]); - if (kwargs.contains("bridge_exponent")) - config.distrib_exponent = py::cast(kwargs["bridge_exponent"]); -} static PassPtr gen_cx_mapping_pass_kwargs( const Architecture &arc, const PlacementPtr &placer, py::kwargs kwargs) { - RoutingConfig config = {}; - update_routing_config(config, kwargs); + RoutingMethodPtr method = std::make_shared(100); + std::vector config = {method}; + + if (kwargs.contains("config")) { + config = py::cast>(kwargs["config"]); + } bool directed_cx = false; - if (kwargs.contains("directed_cx")) + if (kwargs.contains("directed_cx")) { directed_cx = py::cast(kwargs["directed_cx"]); + } bool delay_measures = true; - if (kwargs.contains("delay_measures")) + if (kwargs.contains("delay_measures")) { delay_measures = py::cast(kwargs["delay_measures"]); + } return gen_cx_mapping_pass(arc, placer, config, directed_cx, delay_measures); } -static PassPtr gen_default_routing_pass( - const Architecture &arc, py::kwargs kwargs) { - RoutingConfig config = {}; - update_routing_config(config, kwargs); +static PassPtr gen_default_routing_pass(const Architecture &arc) { + RoutingMethodPtr method = std::make_shared(100); + std::vector config = {method}; return gen_routing_pass(arc, config); } @@ -78,13 +73,6 @@ static PassPtr gen_default_aas_routing_pass( return gen_full_mapping_pass_phase_poly(arc, lookahead, cnotsynthtype); } -static PassPtr gen_full_mapping_pass_kwargs( - const Architecture &arc, const PlacementPtr &placer, py::kwargs kwargs) { - RoutingConfig config = {}; - update_routing_config(config, kwargs); - return gen_full_mapping_pass(arc, placer, config); -} - static const py::module &decompose_module() { static const py::module decomposer_ = py::module::import("pytket.circuit.decompose_classical"); @@ -503,10 +491,6 @@ PYBIND11_MODULE(passes, m) { "RoutingPass", &gen_default_routing_pass, "Construct a pass to route to the connectivity graph of an " ":py:class:`Architecture`. Edge direction is ignored." - "\n\n:param arc: The architecture to use for connectivity information." - "\n:param \\**kwargs: Parameters for routing: " - "(int)swap_lookahead=50, (int)bridge_lookahead=4, " - "(int)bridge_interactions=2, (float)bridge_exponent=0." "\n:return: a pass that routes to the given device architecture", py::arg("arc")); @@ -523,18 +507,18 @@ PYBIND11_MODULE(passes, m) { py::arg("qubit_map")); m.def( - "FullMappingPass", &gen_full_mapping_pass_kwargs, + "FullMappingPass", &gen_full_mapping_pass, "Construct a pass to relabel :py:class:`Circuit` Qubits to " ":py:class:`Architecture` Nodes, and then route to the connectivity " "graph " "of an :py:class:`Architecture`. Edge direction is ignored." "\n\n:param arc: The architecture to use for connectivity information. " "\n:param placer: The Placement used for relabelling." - "\n:param \\**kwargs: Parameters for routing: " - "(int)swap_lookahead=50, (int)bridge_lookahead=4, " - "(int)bridge_interactions=2, (float)bridge_exponent=0." + "\n:param config: Parameters for routing, a " + " list of RoutingMethod, each method is checked" + " and run if applicable in turn." "\n:return: a pass to perform the remapping", - py::arg("arc"), py::arg("placer")); + py::arg("arc"), py::arg("placer"), py::arg("config")); m.def( "DefaultMappingPass", &gen_default_mapping_pass, @@ -580,8 +564,6 @@ PYBIND11_MODULE(passes, m) { "\n\n:param arc: The Architecture used for connectivity information." "\n:param placer: The placement used for relabelling." "\n:param \\**kwargs: Parameters for routing: " - "(int)swap_lookahead=50, (int)bridge_lookahead=4, " - "(int)bridge_interactions=2, (float)bridge_exponent=0, " "(bool)directed_cx=false, (bool)delay_measures=true" "\n:return: a pass to perform the remapping", py::arg("arc"), py::arg("placer")); diff --git a/pytket/binders/placement.cpp b/pytket/binders/placement.cpp new file mode 100644 index 0000000000..582c098264 --- /dev/null +++ b/pytket/binders/placement.cpp @@ -0,0 +1,219 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Routing/Placement.hpp" + +#include +#include +#include +#include + +#include "Utils/Json.hpp" +#include "binder_json.hpp" +#include "binder_utils.hpp" +#include "typecast.hpp" + +namespace py = pybind11; +using json = nlohmann::json; + +namespace tket { + +// definitely a better way of doing this ... +void amend_config_from_kwargs(NoiseAwarePlacement &pobj, py::kwargs kwargs) { + PlacementConfig config_ = pobj.get_config(); + + if (kwargs.contains("depth_limit")) + config_.depth_limit = py::cast(kwargs["depth_limit"]); + if (kwargs.contains("max_interaction_edges")) + config_.max_interaction_edges = + py::cast(kwargs["max_interaction_edges"]); + if (kwargs.contains("max_matches")) + config_.vf2_max_matches = py::cast(kwargs["max_matches"]); + if (kwargs.contains("contraction_ratio")) + config_.arc_contraction_ratio = + py::cast(kwargs["contraction_ratio"]); + if (kwargs.contains("timeout")) + config_.timeout = py::cast(kwargs["timeout"]); + + pobj.set_config(config_); +} +void amend_config_from_kwargs(GraphPlacement &pobj, py::kwargs kwargs) { + PlacementConfig config_ = pobj.get_config(); + + if (kwargs.contains("depth_limit")) + config_.depth_limit = py::cast(kwargs["depth_limit"]); + if (kwargs.contains("max_interaction_edges")) + config_.max_interaction_edges = + py::cast(kwargs["max_interaction_edges"]); + if (kwargs.contains("max_matches")) + config_.vf2_max_matches = py::cast(kwargs["max_matches"]); + if (kwargs.contains("contraction_ratio")) + config_.arc_contraction_ratio = + py::cast(kwargs["contraction_ratio"]); + if (kwargs.contains("timeout")) + config_.timeout = py::cast(kwargs["timeout"]); + pobj.set_config(config_); +} + +void place_with_map(Circuit &circ, qubit_mapping_t &qmap) { + Architecture arc; + Placement plobj(arc); + plobj.place_with_map(circ, qmap); +} + +PYBIND11_MODULE(placement, m) { + py::class_>( + m, "Placement", + "The base Placement class, contains methods for getting maps " + "between Circuit Qubits and Architecture Nodes and for relabelling " + "Circuit Qubits.") + + .def(py::init(), + "The constructor for a Placement object. The Architecture object " + "describes the connectivity between " + "qubits.\n\n:param arc: An Architecture object.", + py::arg("arc")) + .def("__repr__", + [](const Placement &) { return ""; }) + .def("place", &Placement::place, + "Relabels Circuit Qubits to Architecture Nodes and 'unplaced'. For " + "base Placement, all Qubits and labelled 'unplaced'. " + "\n\n:param circuit: The Circuit being relabelled.", + py::arg("circuit")) + .def_static( + "place_with_map", &Placement::place_with_map, + "Relabels Circuit Qubits to Architecture Nodes using given map. " + "\n\n:param circuit: The circuit being relabelled\n:param " + "qmap: The map from logical to physical qubits to apply.", + py::arg("circuit"), py::arg("qmap")) + .def("get_placement_map", &Placement::get_placement_map, + "Returns a map from logical to physical qubits that is Architecture " + "appropriate for the given Circuit. " + "\n\n:param circuit: The circuit a map is designed for." + "\n:return: dictionary mapping " CLSOBJS(Qubit) " to " + CLSOBJS(Node), + py::arg("circuit")) + .def("get_placement_maps", &Placement::get_all_placement_maps, + "Returns a list of maps from logical to physical qubits that " + "are Architecture appropriate for the given Circuit. Each map is " + "estimated to given a similar SWAP overheard after routing. " + "\n\n:param circuit: The circuit the maps are designed for." + "\n:return: list of dictionaries mapping " CLSOBJS(Qubit) " " + "to " CLSOBJS(Node), + py::arg("circuit")) + .def( + "to_dict", [](const PlacementPtr &placement) { return json(placement); }, + "Return a JSON serializable dict representation of " + "the Placement.\n" + ":return: dict representing the Placement.") + .def_static( + "from_dict", [](const json &j) { return j.get(); }, + "Construct Placement instance from JSON serializable " + "dict representation of the Placement."); + + py::class_, Placement>( + m, "LinePlacement", + "The LinePlacement class, contains methods for getting maps " + "between Circuit Qubits and Architecture Nodes and for relabelling " + "Circuit Qubits.") + .def( + py::init(), + "The constructor for a LinePlacement object. The Architecture " + "object describes the connectivity " + "between qubits.\n\n:param arc: An Architecture object.", + py::arg("arc")) + .def("__repr__", [](const Placement &) { + return ""; + }); + + py::class_, Placement>( + m, "GraphPlacement", + "The GraphPlacement class, contains methods for getting maps " + "between Circuit Qubits and Architecture Nodes and for relabelling " + "Circuit Qubits.") + .def( + py::init(), + "The constructor for a GraphPlacement object. The Architecture " + "object describes the connectivity " + "between qubits.\n\n:param arc: An Architecture object.", + py::arg("arc")) + .def( + "__repr__", + [](const Placement &) { return ""; }) + .def( + "modify_config", + [](GraphPlacement &pobj, py::kwargs kwargs) { + amend_config_from_kwargs(pobj, kwargs); + }, + "Overides default Placement parameters to given values. Timeout is " + "in milliseconds" + "\n:param \\**kwargs: Parameters for placement: " + "(int)depth_limit=5, (int)max_interaction_edges=edges in " + "the " + "device graph, (int)max_matches=10000, " + "(int)contraction_ratio=10, (int)timeout=60000."); + + py::class_< + NoiseAwarePlacement, std::shared_ptr, Placement>( + m, "NoiseAwarePlacement", + "The NoiseAwarePlacement class, contains methods for getting maps " + "between Circuit Qubits and Architecture Nodes and for relabelling " + "Circuit Qubits. It uses gate error rates and readout errors " + "to find the best placement map.") + .def( + py::init< + Architecture &, avg_node_errors_t, avg_link_errors_t, + avg_readout_errors_t>(), + "The constructor for a NoiseAwarePlacement object. The Architecture " + "object describes the connectivity between qubits. " + "The dictionaries passed as parameters indicate the average " + "gate errors " + "for single- and two-qubit gates as well as readout errors. " + "If no error is given for a given node or pair of nodes, the " + "fidelity is assumed to be 1." + "\n\n:param arc: An Architecture object\n" + ":param node_errors: a dictionary mapping nodes in the " + "architecture to average single-qubit gate errors\n" + ":param link_errors: a dictionary mapping pairs of nodes in the " + "architecture to average two-qubit gate errors\n" + ":param readout_errors: a dictionary mapping nodes in the " + "architecture to average measurement readout errors.", + py::arg("arc"), py::arg("node_errors") = py::dict(), + py::arg("link_errors") = py::dict(), + py::arg("readout_errors") = py::dict()) + .def( + "__repr__", + [](const Placement &) { return ""; }) + .def( + "modify_config", + [](NoiseAwarePlacement &pobj, py::kwargs kwargs) { + amend_config_from_kwargs(pobj, kwargs); + }, + "Overides default Placement parameters to given values. Timeout is " + "in milliseconds" + "\n:param \\**kwargs: Parameters for placement: " + "(int)depth_limit=5, (int)max_interaction_edges=edges in " + "the " + "device graph, (int)max_matches=10000, " + "(int)contraction_ratio=10, (int)timeout=60000."); + + m.def( + "place_with_map", &place_with_map, + "Relabels Circuit Qubits according to a map. If provided map " + "is partial, remaining Circuit Qubits are left 'unplaced'. " + "\n\n:param circuit: The Circuit being relabelled. \n:param qmap: " + "The map from logical to physical qubits to apply.", + py::arg("circuit"), py::arg("qmap")); +} +} // namespace tket diff --git a/pytket/binders/routing.cpp b/pytket/binders/routing.cpp index 281f6a7fcf..66707b342a 100644 --- a/pytket/binders/routing.cpp +++ b/pytket/binders/routing.cpp @@ -32,49 +32,6 @@ using json = nlohmann::json; namespace tket { -// definitely a better way of doing this ... -void amend_config_from_kwargs(NoiseAwarePlacement &pobj, py::kwargs kwargs) { - PlacementConfig config_ = pobj.get_config(); - - if (kwargs.contains("depth_limit")) - config_.depth_limit = py::cast(kwargs["depth_limit"]); - if (kwargs.contains("max_interaction_edges")) - config_.max_interaction_edges = - py::cast(kwargs["max_interaction_edges"]); - if (kwargs.contains("max_matches")) - config_.vf2_max_matches = py::cast(kwargs["max_matches"]); - if (kwargs.contains("contraction_ratio")) - config_.arc_contraction_ratio = - py::cast(kwargs["contraction_ratio"]); - if (kwargs.contains("timeout")) - config_.timeout = py::cast(kwargs["timeout"]); - - pobj.set_config(config_); -} -void amend_config_from_kwargs(GraphPlacement &pobj, py::kwargs kwargs) { - PlacementConfig config_ = pobj.get_config(); - - if (kwargs.contains("depth_limit")) - config_.depth_limit = py::cast(kwargs["depth_limit"]); - if (kwargs.contains("max_interaction_edges")) - config_.max_interaction_edges = - py::cast(kwargs["max_interaction_edges"]); - if (kwargs.contains("max_matches")) - config_.vf2_max_matches = py::cast(kwargs["max_matches"]); - if (kwargs.contains("contraction_ratio")) - config_.arc_contraction_ratio = - py::cast(kwargs["contraction_ratio"]); - if (kwargs.contains("timeout")) - config_.timeout = py::cast(kwargs["timeout"]); - pobj.set_config(config_); -} - -void place_with_map(Circuit &circ, qubit_mapping_t &qmap) { - Architecture arc; - Placement plobj(arc); - plobj.place_with_map(circ, qmap); -} - std::pair route( const Circuit &circuit, const Architecture &arc, py::kwargs kwargs) { RoutingConfig config = {}; @@ -94,302 +51,6 @@ std::pair route( } PYBIND11_MODULE(routing, m) { - py::class_>( - m, "NodeGraph", - "Abstract class for describing a device connectivity graph."); - - py::class_>( - m, "Architecture", - "Class describing the connectivity of qubits on a general device.") - .def( - py::init([](const std::vector> - &connections) { return Architecture(connections); }), - "The constructor for an architecture with connectivity " - "between qubits.\n\n:param connections: A list of pairs " - "representing qubit indices that can perform two-qubit " - "operations", - py::arg("connections")) - .def( - py::init> &>(), - "The constructor for an architecture with connectivity " - "between qubits.\n\n:param connections: A list of pairs " - "representing Nodes that can perform two-qubit operations", - py::arg("connections")) - .def( - "__repr__", - [](const Architecture &arc) { - return ""; - }) - .def( - "get_distance", &Architecture::get_distance, - "given two nodes in Architecture, " - "returns distance between them", - py::arg("node_0"), py::arg("node_1")) - .def( - "get_adjacent_nodes", &Architecture::get_neighbour_nodes, - "given a node, returns adjacent nodes in Architecture.", - py::arg("node")) - .def_property_readonly( - "nodes", &Architecture::get_all_nodes_vec, - "Returns all nodes of architecture as Node objects.") - .def_property_readonly( - "coupling", &Architecture::get_all_edges_vec, - "Returns the coupling map of the Architecture as " - "UnitIDs. ") - .def( - "to_dict", [](const Architecture &arch) { return json(arch); }, - "Return a JSON serializable dict representation of " - "the Architecture.\n" - ":return: dict containing nodes and links.") - .def_static( - "from_dict", [](const json &j) { return j.get(); }, - "Construct Architecture instance from JSON serializable " - "dict representation of the Architecture.") - // as far as Python is concerned, Architectures are immutable - .def( - "__deepcopy__", - [](const Architecture &arc, py::dict = py::dict()) { return arc; }) - .def( - "__repr__", - [](const Architecture &arc) { - return ""; - }) - .def(py::self == py::self); - py::class_< - SquareGrid, std::shared_ptr, Architecture, - graphs::AbstractGraph>( - m, "SquareGrid", - "Architecture class for qubits arranged in a square lattice of " - "given number of rows and columns. Qubits are arranged with qubits " - "values increasing first along rows then along columns i.e. for a " - "3 x 3 grid:\n\n 0 1 2\n\n 3 4 5\n\n 6 7 8") - .def( - py::init(), - "The constructor for a Square Grid architecture with some " - "undirected connectivity between qubits.\n\n:param n_rows: " - "The number of rows in the grid\n:param n_columns: The number " - "of columns in the grid", - py::arg("n_rows"), py::arg("n_columns")) - .def( - py::init(), - "The constructor for a Square Grid architecture with some " - "undirected connectivity between qubits.\n\n:param n_rows: " - "The number of rows in the grid\n:param n_columns: The number " - "of columns in the grid\n:param n_layers: The number of " - "layers of grids", - py::arg("n_rows"), py::arg("n_columns"), py::arg("n_layers")) - .def( - "squind_to_qind", - [](const SquareGrid &self, const unsigned row, const unsigned col) { - return self.squind_to_qind(row, col); - }, - "Converts a (row,column) index for a square grid to a " - "single " - "qubit index\n\n:param row: The given row index\n:param " - "column: The given column index\n:return: the " - "corresponding " - "global qubit index", - py::arg("row"), py::arg("column")) - .def( - "qind_to_squind", &SquareGrid::qind_to_squind, - "Converts a single qubit index to a (row,column) index for a " - "square grid.\n\n:param index: The global qubit " - "index\n:return: the corresponding grid index as a pair " - "(row,column)", - py::arg("index")) - // as far as Python is concerned, Architectures are immutable - .def( - "__deepcopy__", - [](const SquareGrid &arc, py::dict = py::dict()) { return arc; }) - .def("__repr__", [](const SquareGrid &arc) { - return ""; - }); - py::class_< - RingArch, std::shared_ptr, Architecture, - graphs::AbstractGraph>( - m, "RingArch", - "Architecture class for number of qubits arranged in a ring.") - .def( - py::init(), - "The constructor for a RingArchitecture with some undirected " - "connectivity between qubits.\n\n:param number of qubits", - py::arg("nodes")) - .def("__repr__", [](const RingArch &arc) { - return ""; - }); - py::class_>( - m, "FullyConnected", - "An architecture with full connectivity between qubits.") - .def( - py::init(), - "Construct a fully-connected architecture." - "\n\n:param n: number of qubits", - py::arg("n")) - .def( - "__repr__", - [](const FullyConnected &arc) { - return ""; - }) - .def(py::self == py::self) - .def_property_readonly( - "nodes", &FullyConnected::get_all_nodes_vec, - "All nodes of the architecture as :py:class:`Node` objects.") - .def( - "to_dict", [](const FullyConnected &arch) { return json(arch); }, - "JSON-serializable dict representation of the architecture." - "\n\n:return: dict containing nodes") - .def_static( - "from_dict", [](const json &j) { return j.get(); }, - "Construct FullyConnected instance from dict representation."); - - py::class_>( - m, "Placement", - "The base Placement class, contains methods for getting maps " - "between Circuit Qubits and Architecture Nodes and for relabelling " - "Circuit Qubits.") - - .def(py::init(), - "The constructor for a Placement object. The Architecture object " - "describes the connectivity between " - "qubits.\n\n:param arc: An Architecture object.", - py::arg("arc")) - .def("__repr__", - [](const Placement &) { return ""; }) - .def("place", &Placement::place, - "Relabels Circuit Qubits to Architecture Nodes and 'unplaced'. For " - "base Placement, all Qubits and labelled 'unplaced'. " - "\n\n:param circuit: The Circuit being relabelled.", - py::arg("circuit")) - .def_static( - "place_with_map", &Placement::place_with_map, - "Relabels Circuit Qubits to Architecture Nodes using given map. " - "\n\n:param circuit: The circuit being relabelled\n:param " - "qmap: The map from logical to physical qubits to apply.", - py::arg("circuit"), py::arg("qmap")) - .def("get_placement_map", &Placement::get_placement_map, - "Returns a map from logical to physical qubits that is Architecture " - "appropriate for the given Circuit. " - "\n\n:param circuit: The circuit a map is designed for." - "\n:return: dictionary mapping " CLSOBJS(Qubit) " to " - CLSOBJS(Node), - py::arg("circuit")) - .def("get_placement_maps", &Placement::get_all_placement_maps, - "Returns a list of maps from logical to physical qubits that " - "are Architecture appropriate for the given Circuit. Each map is " - "estimated to given a similar SWAP overheard after routing. " - "\n\n:param circuit: The circuit the maps are designed for." - "\n:return: list of dictionaries mapping " CLSOBJS(Qubit) " " - "to " CLSOBJS(Node), - py::arg("circuit")) - .def( - "to_dict", [](const PlacementPtr &placement) { return json(placement); }, - "Return a JSON serializable dict representation of " - "the Placement.\n" - ":return: dict representing the Placement.") - .def_static( - "from_dict", [](const json &j) { return j.get(); }, - "Construct Placement instance from JSON serializable " - "dict representation of the Placement."); - - py::class_, Placement>( - m, "LinePlacement", - "The LinePlacement class, contains methods for getting maps " - "between Circuit Qubits and Architecture Nodes and for relabelling " - "Circuit Qubits.") - .def( - py::init(), - "The constructor for a LinePlacement object. The Architecture " - "object describes the connectivity " - "between qubits.\n\n:param arc: An Architecture object.", - py::arg("arc")) - .def("__repr__", [](const Placement &) { - return ""; - }); - - py::class_, Placement>( - m, "GraphPlacement", - "The GraphPlacement class, contains methods for getting maps " - "between Circuit Qubits and Architecture Nodes and for relabelling " - "Circuit Qubits.") - .def( - py::init(), - "The constructor for a GraphPlacement object. The Architecture " - "object describes the connectivity " - "between qubits.\n\n:param arc: An Architecture object.", - py::arg("arc")) - .def( - "__repr__", - [](const Placement &) { return ""; }) - .def( - "modify_config", - [](GraphPlacement &pobj, py::kwargs kwargs) { - amend_config_from_kwargs(pobj, kwargs); - }, - "Overides default Placement parameters to given values. Timeout is " - "in milliseconds" - "\n:param \\**kwargs: Parameters for placement: " - "(int)depth_limit=5, (int)max_interaction_edges=edges in " - "the " - "device graph, (int)max_matches=10000, " - "(int)contraction_ratio=10, (int)timeout=60000."); - - py::class_< - NoiseAwarePlacement, std::shared_ptr, Placement>( - m, "NoiseAwarePlacement", - "The NoiseAwarePlacement class, contains methods for getting maps " - "between Circuit Qubits and Architecture Nodes and for relabelling " - "Circuit Qubits. It uses gate error rates and readout errors " - "to find the best placement map.") - .def( - py::init< - Architecture &, avg_node_errors_t, avg_link_errors_t, - avg_readout_errors_t>(), - "The constructor for a NoiseAwarePlacement object. The Architecture " - "object describes the connectivity between qubits. " - "The dictionaries passed as parameters indicate the average " - "gate errors " - "for single- and two-qubit gates as well as readout errors. " - "If no error is given for a given node or pair of nodes, the " - "fidelity is assumed to be 1." - "\n\n:param arc: An Architecture object\n" - ":param node_errors: a dictionary mapping nodes in the " - "architecture to average single-qubit gate errors\n" - ":param link_errors: a dictionary mapping pairs of nodes in the " - "architecture to average two-qubit gate errors\n" - ":param readout_errors: a dictionary mapping nodes in the " - "architecture to average measurement readout errors.", - py::arg("arc"), py::arg("node_errors") = py::dict(), - py::arg("link_errors") = py::dict(), - py::arg("readout_errors") = py::dict()) - .def( - "__repr__", - [](const Placement &) { return ""; }) - .def( - "modify_config", - [](NoiseAwarePlacement &pobj, py::kwargs kwargs) { - amend_config_from_kwargs(pobj, kwargs); - }, - "Overides default Placement parameters to given values. Timeout is " - "in milliseconds" - "\n:param \\**kwargs: Parameters for placement: " - "(int)depth_limit=5, (int)max_interaction_edges=edges in " - "the " - "device graph, (int)max_matches=10000, " - "(int)contraction_ratio=10, (int)timeout=60000."); - - m.def( - "place_with_map", &place_with_map, - "Relabels Circuit Qubits according to a map. If provided map " - "is partial, remaining Circuit Qubits are left 'unplaced'. " - "\n\n:param circuit: The Circuit being relabelled. \n:param qmap: " - "The map from logical to physical qubits to apply.", - py::arg("circuit"), py::arg("qmap")); - m.def( "route", [](const Circuit &circuit, const Architecture &arc, py::kwargs kwargs) { diff --git a/pytket/docs/changelog.rst b/pytket/docs/changelog.rst index 7f08a9c096..f917bd759c 100644 --- a/pytket/docs/changelog.rst +++ b/pytket/docs/changelog.rst @@ -6,7 +6,6 @@ Changelog Minor new features: -* Add `NodeGraph` as abstract base class for device connectivity graphs. * Improved CnX gate decomposition. 0.17.0 (November 2021) diff --git a/pytket/docs/conf.py b/pytket/docs/conf.py index 22bf398b15..1539c99b04 100644 --- a/pytket/docs/conf.py +++ b/pytket/docs/conf.py @@ -215,7 +215,8 @@ "pytket.backends.backend.Backend": "pytket.backends.Backend", "tket::Predicate": "pytket._tket.predicates.Predicate", "tket::Qubit": "pytket._tket.circuit.Qubit", - "tket::Architecture": "pytket._tket.routing.Architecture", + "tket::Architecture": "pytket._tket.architecture.Architecture", + "tket::RoutingMethod": "pytket._tket.mapping.RoutingMethod", "tket::CircBox": "pytket._tket.circuit.CircBox", "tket::ExpBox": "pytket._tket.circuit.ExpBox", "tket::QControlBox": "pytket._tket.circuit.QControlBox", diff --git a/pytket/pytket/__init__.py b/pytket/pytket/__init__.py index 6bac4d5143..c414c351e1 100755 --- a/pytket/pytket/__init__.py +++ b/pytket/pytket/__init__.py @@ -21,6 +21,8 @@ Bit, ) import pytket.routing +import pytket.architecture +import pytket.placement import pytket.transform import pytket.telemetry diff --git a/pytket/pytket/architecture/__init__.py b/pytket/pytket/architecture/__init__.py new file mode 100644 index 0000000000..41556524e2 --- /dev/null +++ b/pytket/pytket/architecture/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019-2021 Cambridge Quantum Computing +# +# You may not use this file except in compliance with the Licence. +# You may obtain a copy of the Licence in the LICENCE file accompanying +# these documents or at: +# +# https://cqcl.github.io/pytket/build/html/licence.html + +"""The architecture module provides an API to interact with the + tket ::py:class:'Architecture' class, which for some set of identified physical qubits, + defines which can run two-qubit gates between them. This module is provided in binary + form during the PyPI installation.""" + +from pytket._tket.architecture import * # type: ignore diff --git a/pytket/pytket/backends/backendinfo.py b/pytket/pytket/backends/backendinfo.py index a29511fc22..8c77753dfd 100644 --- a/pytket/pytket/backends/backendinfo.py +++ b/pytket/pytket/backends/backendinfo.py @@ -16,7 +16,7 @@ from dataclasses import dataclass, field, asdict from typing import Any, Dict, List, Optional, Set, cast, Tuple, Union -from pytket.routing import Architecture, FullyConnected # type: ignore +from pytket.architecture import Architecture, FullyConnected # type: ignore from pytket.circuit import Node, OpType # type: ignore diff --git a/pytket/pytket/placement/__init__.py b/pytket/pytket/placement/__init__.py new file mode 100644 index 0000000000..f2c2a3cce4 --- /dev/null +++ b/pytket/pytket/placement/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2019-2021 Cambridge Quantum Computing +# +# You may not use this file except in compliance with the Licence. +# You may obtain a copy of the Licence in the LICENCE file accompanying +# these documents or at: +# +# https://cqcl.github.io/pytket/build/html/licence.html + +"""The placement module provides an API to interact with the many + tket ::py:class:'Placement' options, providing methods for relabelling + logical circuit qubit identifiers to physical architecture node identifiers, + for the purpose of compilation. This module is provided in binary form during the + PyPI installation.""" + +from pytket._tket.placement import * # type: ignore diff --git a/pytket/setup.py b/pytket/setup.py index 1cd0f5f49c..1cf3a226ee 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -193,10 +193,12 @@ def build_extension(self, ext): "pauli", "program", "routing", + "mapping", "transform", "tailoring", - "mapping", "zx", + "placement", + "architecture", ] diff --git a/pytket/tests/backend_test.py b/pytket/tests/backend_test.py index 1ef850fb42..85bf4ff0b9 100644 --- a/pytket/tests/backend_test.py +++ b/pytket/tests/backend_test.py @@ -24,7 +24,8 @@ from pytket.circuit import Circuit, OpType, BasisOrder, Qubit, Bit, Node # type: ignore from pytket.predicates import CompilationUnit # type: ignore from pytket.passes import PauliSimp, CliffordSimp, ContextSimp # type: ignore -from pytket.routing import Architecture, route # type: ignore +from pytket.routing import route # type: ignore +from pytket.architecture import Architecture # type: ignore from pytket.utils.outcomearray import OutcomeArray, readout_counts from pytket.utils.prepare import prepare_circuit from pytket.backends import CircuitNotValidError diff --git a/pytket/tests/backendinfo_test.py b/pytket/tests/backendinfo_test.py index 1f48a61cf7..4eee382ab1 100644 --- a/pytket/tests/backendinfo_test.py +++ b/pytket/tests/backendinfo_test.py @@ -23,7 +23,7 @@ import pytest # type: ignore from pytket.backends.backendinfo import BackendInfo, fully_connected_backendinfo -from pytket.routing import SquareGrid, FullyConnected # type: ignore +from pytket.architecture import SquareGrid, FullyConnected # type: ignore from pytket.circuit import OpType, Node # type: ignore import strategies as st # type: ignore diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py index c4e670b2a5..6ecacd633b 100644 --- a/pytket/tests/mapping_test.py +++ b/pytket/tests/mapping_test.py @@ -7,7 +7,7 @@ # https://cqcl.github.io/pytket/build/html/licence.html from pytket.mapping import MappingManager, RoutingMethodCircuit, LexiRouteRoutingMethod # type: ignore -from pytket.routing import Architecture # type: ignore +from pytket.architecture import Architecture # type: ignore from pytket import Circuit, OpType from pytket.circuit import Node # type: ignore from typing import Tuple, Dict diff --git a/pytket/tests/mitigation_test.py b/pytket/tests/mitigation_test.py index 31b69bc956..00220dff8f 100644 --- a/pytket/tests/mitigation_test.py +++ b/pytket/tests/mitigation_test.py @@ -17,7 +17,8 @@ from pytket.utils.spam import SpamCorrecter, compress_counts from pytket.circuit import Node, Circuit # type: ignore -from pytket.routing import Architecture, route # type: ignore +from pytket.routing import route # type: ignore +from pytket.architecture import Architecture # type: ignore from pytket.passes import DelayMeasures # type: ignore from typing import List, Dict, Counter, Tuple from pytket.utils.outcomearray import OutcomeArray diff --git a/pytket/tests/predicates_test.py b/pytket/tests/predicates_test.py index 5a70ae847e..d75bc34b3f 100644 --- a/pytket/tests/predicates_test.py +++ b/pytket/tests/predicates_test.py @@ -74,7 +74,9 @@ CompilationUnit, UserDefinedPredicate, ) -from pytket.routing import Architecture, Placement, GraphPlacement # type: ignore +from pytket.mapping import LexiRouteRoutingMethod # type: ignore +from pytket.architecture import Architecture # type: ignore +from pytket.placement import Placement, GraphPlacement # type: ignore from pytket.transform import Transform, PauliSynthStrat, CXConfigType # type: ignore from pytket._tket.passes import SynthesiseOQC # type: ignore import numpy as np @@ -212,7 +214,7 @@ def test_routing_and_placement_pass() -> None: assert seq_pass.apply(cu2) assert cu2.initial_map == expected_map - full_pass = FullMappingPass(arc, pl) + full_pass = FullMappingPass(arc, pl, config=[LexiRouteRoutingMethod(100)]) cu3 = CompilationUnit(circ.copy()) assert full_pass.apply(cu3) assert cu3.initial_map == expected_map @@ -638,12 +640,8 @@ def sq(a: float, b: float, c: float) -> Circuit: assert euler_pass.to_dict()["StandardPass"]["euler_p"] == "Rx" # RoutingPass arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) - r_pass = RoutingPass(arc, swap_lookahead=10, bridge_interactions=10) + r_pass = RoutingPass(arc) assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass" - assert r_pass.to_dict()["StandardPass"]["routing_config"]["depth_limit"] == 10 - assert ( - r_pass.to_dict()["StandardPass"]["routing_config"]["interactions_limit"] == 10 - ) assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"]) # PlacementPass placer = GraphPlacement(arc) @@ -659,7 +657,7 @@ def sq(a: float, b: float, c: float) -> Circuit: [k.to_list(), v.to_list()] for k, v in qm.items() ] # FullMappingPass - fm_pass = FullMappingPass(arc, placer) + fm_pass = FullMappingPass(arc, placer, config=[LexiRouteRoutingMethod(100)]) assert fm_pass.to_dict()["pass_class"] == "SequencePass" p_pass = fm_pass.get_sequence()[0] r_pass = fm_pass.get_sequence()[1] diff --git a/pytket/tests/routing_test.py b/pytket/tests/routing_test.py index bfb96e3c2e..9c67d26ec3 100644 --- a/pytket/tests/routing_test.py +++ b/pytket/tests/routing_test.py @@ -15,17 +15,11 @@ from pathlib import Path from pytket.circuit import OpType, Qubit, Node, Circuit # type: ignore from pytket.routing import ( # type: ignore - NodeGraph, - Architecture, - LinePlacement, - GraphPlacement, - NoiseAwarePlacement, - Placement, - SquareGrid, - FullyConnected, - place_with_map, route, ) +from pytket.placement import LinePlacement, GraphPlacement, NoiseAwarePlacement, Placement, place_with_map # type: ignore +from pytket.architecture import Architecture, SquareGrid, FullyConnected # type: ignore +from pytket.mapping import LexiRouteRoutingMethod # type: ignore from pytket.predicates import CompilationUnit, NoMidMeasurePredicate # type: ignore from pytket.passes import ( # type: ignore DefaultMappingPass, @@ -101,13 +95,10 @@ def test_fully_connected() -> None: def test_arch_types() -> None: arch = Architecture([(0, 1)]) assert isinstance(arch, Architecture) - assert isinstance(arch, NodeGraph) fc = FullyConnected(2) assert isinstance(fc, FullyConnected) - assert isinstance(fc, NodeGraph) sg = SquareGrid(2, 2, 2) assert isinstance(sg, SquareGrid) - assert isinstance(sg, NodeGraph) def test_placements() -> None: @@ -463,19 +454,15 @@ def test_RoutingPass() -> None: cu_1 = CompilationUnit(circ) placer = GraphPlacement(arc) p_pass = PlacementPass(placer) - r_pass_0 = RoutingPass(arc, swap_lookahead=10, bridge_interactions=10) - r_pass_1 = RoutingPass(arc, swap_lookahead=10, bridge_interactions=0) + r_pass_0 = RoutingPass(arc) + r_pass_1 = RoutingPass(arc) p_pass.apply(cu_0) p_pass.apply(cu_1) r_pass_0.apply(cu_0) r_pass_1.apply(cu_1) out_circ_0 = cu_0.circuit out_circ_1 = cu_1.circuit - # TODO Should we expect BRIDGE gates in out_circ_0? If not, replace with an example - # where we would. See See https://github.com/CQCL-DEV/tket/pull/747. - # assert out_circ_0.n_gates_of_type(OpType.BRIDGE) == 1 assert out_circ_0.valid_connectivity(arc, False, True) - assert out_circ_1.n_gates_of_type(OpType.BRIDGE) == 0 assert out_circ_1.valid_connectivity(arc, False, True) @@ -487,15 +474,13 @@ def test_FullMappingPass() -> None: cu_1 = CompilationUnit(circ) gp_placer = GraphPlacement(arc) lp_placer = LinePlacement(arc) - m_pass_0 = FullMappingPass( - arc, gp_placer, swap_lookahead=10, bridge_interactions=10 - ) - m_pass_1 = FullMappingPass(arc, lp_placer) + + m_pass_0 = FullMappingPass(arc, gp_placer, config=[LexiRouteRoutingMethod(1)]) + m_pass_1 = FullMappingPass(arc, lp_placer, config=[LexiRouteRoutingMethod(75)]) m_pass_0.apply(cu_0) m_pass_1.apply(cu_1) out_circ_0 = cu_0.circuit out_circ_1 = cu_1.circuit - assert out_circ_0.n_gates < out_circ_1.n_gates assert out_circ_0.valid_connectivity(arc, False, True) assert out_circ_1.valid_connectivity(arc, False, True) @@ -685,7 +670,11 @@ def test_CXMappingPass() -> None: gp_placer = GraphPlacement(arc) lp_placer = LinePlacement(arc) m_pass_0 = CXMappingPass( - arc, gp_placer, swap_lookahead=10, bridge_interactions=10, directed_cx=True + arc, + gp_placer, + config=[LexiRouteRoutingMethod(20)], + bridge_interactions=10, + directed_cx=True, ) m_pass_1 = CXMappingPass(arc, lp_placer, delay_measures=False) m_pass_0.apply(cu_0) diff --git a/pytket/tests/strategies.py b/pytket/tests/strategies.py index b271eb6a5d..32c3489f54 100644 --- a/pytket/tests/strategies.py +++ b/pytket/tests/strategies.py @@ -26,7 +26,7 @@ from pytket import Circuit, Qubit, Bit from pytket._tket.circuit import BasisOrder, Node, OpType # type: ignore -from pytket._tket.routing import Architecture # type: ignore +from pytket._tket.architecture import Architecture # type: ignore from pytket.pauli import Pauli, QubitPauliString # type: ignore from pytket.utils import QubitPauliOperator from pytket.utils.results import KwargTypes diff --git a/schemas/compiler_pass_v1.json b/schemas/compiler_pass_v1.json index dd59080ebd..38fe5c57f0 100644 --- a/schemas/compiler_pass_v1.json +++ b/schemas/compiler_pass_v1.json @@ -1002,36 +1002,23 @@ "op_link_errors" ] }, - "routing_config": { + + "routingmethod" :{ "type": "object", - "description": "A configuration for the routing procedure.", + "description": "A method used during circuit mapping.", "properties": { - "depth_limit": { - "type": "integer", - "minimum": 0, - "description": "The look ahead limit for SWAP picking." - }, - "distrib_limit": { - "type": "integer", - "minimum": 0, - "description": "The look ahead limit for Distributed CX gate checking." - }, - "interactions_limit": { - "type": "integer", - "description": "The number of interactions considered in Distributed CX gate checking." + "name": { + "type": "string", + "description": "String identifying method and whether it can be serialized." }, - "distrib_exponent": { - "type": "integer", - "minimum": 0, - "description": "A factor to balance the consideration for later gates when deciding on Distributed CX gates." - } }, - "required": [ - "depth_limit", - "distrib_limit", - "interactions_limit", - "distrib_exponent" - ] - } + }, + "routing_config":{ + "type": "array", + "description": "A configuration for routing defined by an array of RoutingMethod.", + "items": { + "$ref": "#/definitions/routingmethod" + }, + }, } } \ No newline at end of file diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index 4e6a4ec44c..ba67512b2c 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -235,6 +235,7 @@ set(TKET_SOURCES ${TKET_MAPPING_DIR}/MappingManager.cpp ${TKET_MAPPING_DIR}/LexicographicalComparison.cpp ${TKET_MAPPING_DIR}/LexiRoute.cpp + ${TKET_MAPPING_DIR}/RoutingMethodJson.cpp # Architecture Aware Synthesis diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index c88ba22959..8dc66e1b5f 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -1,6 +1,7 @@ #include "Mapping/LexiRoute.hpp" #include "Mapping/MappingFrontier.hpp" +#include "Utils/Json.hpp" namespace tket { @@ -20,70 +21,6 @@ LexiRoute::LexiRoute( } } -void LexiRoute::merge_with_ancilla(const UnitID& merge, const UnitID& ancilla) { - // get output and input vertices - Vertex merge_v_in = this->mapping_frontier_->circuit_.get_in(merge); - Vertex merge_v_out = this->mapping_frontier_->circuit_.get_out(merge); - Vertex ancilla_v_out = this->mapping_frontier_->circuit_.get_out(ancilla); - // find source vertex & port of merge_v_out - // output vertex, so can assume single edge - Edge merge_out_edge = - this->mapping_frontier_->circuit_.get_nth_out_edge(merge_v_in, 0); - Edge ancilla_in_edge = - this->mapping_frontier_->circuit_.get_nth_in_edge(ancilla_v_out, 0); - // Find port number - port_t merge_target_port = - this->mapping_frontier_->circuit_.get_target_port(merge_out_edge); - port_t ancilla_source_port = - this->mapping_frontier_->circuit_.get_source_port(ancilla_in_edge); - // Find vertices - Vertex merge_v_target = - this->mapping_frontier_->circuit_.target(merge_out_edge); - Vertex ancilla_v_source = - this->mapping_frontier_->circuit_.source(ancilla_in_edge); - - // remove and replace edges - this->mapping_frontier_->circuit_.remove_edge(merge_out_edge); - this->mapping_frontier_->circuit_.remove_edge(ancilla_in_edge); - this->mapping_frontier_->circuit_.add_edge( - {ancilla_v_source, ancilla_source_port}, - {merge_v_target, merge_target_port}, EdgeType::Quantum); - - // instead of manually updating all boundaries, we change which output vertex - // the qubit paths to - Edge merge_in_edge = - this->mapping_frontier_->circuit_.get_nth_in_edge(merge_v_out, 0); - port_t merge_source_port = - this->mapping_frontier_->circuit_.get_source_port(merge_in_edge); - Vertex merge_v_source = - this->mapping_frontier_->circuit_.source(merge_in_edge); - - this->mapping_frontier_->circuit_.remove_edge(merge_in_edge); - this->mapping_frontier_->circuit_.add_edge( - {merge_v_source, merge_source_port}, {ancilla_v_out, 0}, - EdgeType::Quantum); - - // remove empty vertex wire, relabel dag vertices - this->mapping_frontier_->circuit_.dag[merge_v_in].op = - get_op_ptr(OpType::noop); - this->mapping_frontier_->circuit_.dag[merge_v_out].op = - get_op_ptr(OpType::noop); - this->mapping_frontier_->circuit_.remove_vertex( - merge_v_in, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); - this->mapping_frontier_->circuit_.remove_vertex( - merge_v_out, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); - - // Can now just erase "merge" qubit from the circuit - this->mapping_frontier_->circuit_.boundary.get().erase(merge); - - if (this->mapping_frontier_->circuit_.unit_bimaps_.initial) { - this->mapping_frontier_->circuit_.unit_bimaps_.initial->right.erase(merge); - } - if (this->mapping_frontier_->circuit_.unit_bimaps_.final) { - this->mapping_frontier_->circuit_.unit_bimaps_.final->right.erase(merge); - } -} - bool LexiRoute::assign_at_distance( const UnitID& assignee, const Node& root, unsigned distances) { node_set_t valid_nodes; @@ -100,7 +37,7 @@ bool LexiRoute::assign_at_distance( if (this->mapping_frontier_->ancilla_nodes_.find(*it) != this->mapping_frontier_->ancilla_nodes_.end()) { // => node *it is already present in circuit, but as an ancilla - this->merge_with_ancilla(assignee, *it); + this->mapping_frontier_->merge_ancilla(assignee, *it); this->mapping_frontier_->ancilla_nodes_.erase(*it); this->labelling_.erase(*it); this->labelling_[assignee] = *it; @@ -127,7 +64,7 @@ bool LexiRoute::assign_at_distance( if (this->mapping_frontier_->ancilla_nodes_.find(preserved_node) != this->mapping_frontier_->ancilla_nodes_.end()) { // => node *it is already present in circuit, but as an ancilla - this->merge_with_ancilla(assignee, preserved_node); + this->mapping_frontier_->merge_ancilla(assignee, preserved_node); this->mapping_frontier_->ancilla_nodes_.erase(preserved_node); this->labelling_.erase(preserved_node); this->labelling_[assignee] = preserved_node; @@ -262,8 +199,10 @@ void LexiRoute::set_interacting_uids(bool assigned_only) { } } } - } else if (n_edges != 1) { - TKET_ASSERT(!"Vertex should only have 1 or 2 edges."); + } else if ( + n_edges > 2 && this->mapping_frontier_->circuit_.get_OpType_from_Vertex( + v0) != OpType::Barrier) { + TKET_ASSERT(!"Non-Barrier vertex should only have 1 or 2 edges."); } } } @@ -511,59 +450,45 @@ void LexiRoute::solve(unsigned lookahead) { } else { // only need to reset in bridge case this->set_interacting_uids(); - if (check.first) { - Node target = Node(this->interacting_uids_[chosen_swap.first]); - auto path = this->architecture_->get_path(chosen_swap.first, target); - // does path include root and target? - Node central = Node(path[1]); - this->mapping_frontier_->add_bridge(chosen_swap.first, central, target); - } - if (check.second) { - Node target = Node(this->interacting_uids_[chosen_swap.second]); - auto path = this->architecture_->get_path(chosen_swap.second, target); - // does path include root and target? + + auto add_ordered_bridge = [&](const Node& n) { + auto it0 = this->mapping_frontier_->quantum_boundary->find(n); + // this should implicitly be the case if this logic is reached + TKET_ASSERT(it0 != this->mapping_frontier_->quantum_boundary->end()); + + Node other_node = Node(this->interacting_uids_[n]); + auto it1 = this->mapping_frontier_->quantum_boundary->find(other_node); + // this should implicitly be the case if this logic is reached + TKET_ASSERT(it1 != this->mapping_frontier_->quantum_boundary->end()); + + auto path = this->architecture_->get_path(n, other_node); Node central = Node(path[1]); - this->mapping_frontier_->add_bridge(chosen_swap.second, central, target); - } - } - // TODO: Refactor the following to happen during add_swap and add_bridge - // methods - // add ancilla qubits if necessary - if (copy.size() < this->mapping_frontier_->quantum_boundary->size()) { - // implies ancilla qubit is added - // find ancilla qubit, find swap vertex and port by looking at boundary, - // store in ancillas type - for (auto it = - this->mapping_frontier_->quantum_boundary->get().begin(); - it != this->mapping_frontier_->quantum_boundary->get().end(); - ++it) { - bool match = false; - for (auto jt = copy.get().begin(); jt != copy.get().end(); - ++jt) { - if (it->first == jt->first) { - match = true; - break; - } - } - if (!match) { - // extra will be added in it - // This is same condition as SWAP case, which means "Ancilla" has - // already moved to a new physical node - if (!check.first && !check.second) { - if (Node(it->first) == chosen_swap.first) { - this->mapping_frontier_->ancilla_nodes_.insert(chosen_swap.second); - } else { - this->mapping_frontier_->ancilla_nodes_.insert(chosen_swap.first); - } - } else { - this->mapping_frontier_->ancilla_nodes_.insert(Node(it->first)); - } - break; + Edge n_edge = this->mapping_frontier_->circuit_.get_nth_out_edge( + it0->second.first, it0->second.second); + Edge other_edge = this->mapping_frontier_->circuit_.get_nth_out_edge( + it1->second.first, it1->second.second); + + unsigned port0 = + this->mapping_frontier_->circuit_.get_target_port(n_edge); + unsigned port1 = + this->mapping_frontier_->circuit_.get_target_port(other_edge); + // compare port ordering to get control vs target + TKET_ASSERT(port0 != port1); + if (port0 < port1) { + this->mapping_frontier_->add_bridge(n, central, other_node); + } else { + this->mapping_frontier_->add_bridge(other_node, central, n); } + }; + + if (check.first) { + add_ordered_bridge(chosen_swap.first); + } + if (check.second) { + add_ordered_bridge(chosen_swap.second); } } - return; } @@ -584,4 +509,20 @@ unit_map_t LexiRouteRoutingMethod::routing_method( return {}; } +unsigned LexiRouteRoutingMethod::get_max_depth() const { + return this->max_depth_; +} + +nlohmann::json LexiRouteRoutingMethod::serialize() const { + nlohmann::json j; + j["depth"] = this->get_max_depth(); + j["name"] = "LexiRouteRoutingMethod"; + return j; +} + +LexiRouteRoutingMethod LexiRouteRoutingMethod::deserialize( + const nlohmann::json& j) { + return LexiRouteRoutingMethod(j.at("depth").get()); +} + } // namespace tket diff --git a/tket/src/Mapping/LexiRoute.hpp b/tket/src/Mapping/LexiRoute.hpp index ed0393f347..ce37090a5e 100644 --- a/tket/src/Mapping/LexiRoute.hpp +++ b/tket/src/Mapping/LexiRoute.hpp @@ -4,6 +4,7 @@ #include "Mapping/LexicographicalComparison.hpp" #include "Mapping/MappingFrontier.hpp" #include "Mapping/RoutingMethod.hpp" +#include "Mapping/RoutingMethodJson.hpp" namespace tket { @@ -59,16 +60,6 @@ class LexiRoute { */ void set_interacting_uids(bool assigned_only = false); - /** - * Merges the qubit paths of "merge" and "ancilla" in mapping frontier circuit - * such that the output of the final ancilla vertex leads into the input of - * the first merge vertex. - * - * @param merge UnitID to which ancilla path is prepended - * @param ancilla UnitID of ancilla opeartions - */ - void merge_with_ancilla(const UnitID& merge, const UnitID& ancilla); - /** * If there is some "free" Node in Architecture at distance "distances" on * the connectivity graph, assign (relable) UnitID assignee to it. "free" @@ -163,14 +154,14 @@ class LexiRouteRoutingMethod : public RoutingMethod { * * @param _max_depth Number of layers of gates checked inr outed subcircuit. */ - LexiRouteRoutingMethod(unsigned _max_depth); + LexiRouteRoutingMethod(unsigned _max_depth = 10); /** * @return true if method can route subcircuit, false if not */ bool check_method( const std::shared_ptr& /*mapping_frontier*/, - const ArchitecturePtr& /*architecture*/) const; + const ArchitecturePtr& /*architecture*/) const override; /** * @param mapping_frontier Contains boundary of routed/unrouted circuit for @@ -181,12 +172,23 @@ class LexiRouteRoutingMethod : public RoutingMethod { */ unit_map_t routing_method( std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const; + const ArchitecturePtr& architecture) const override; + + /** + * @return Max depth used in lookahead + */ + unsigned get_max_depth() const; + + nlohmann::json serialize() const override; + + static LexiRouteRoutingMethod deserialize(const nlohmann::json& j); private: unsigned max_depth_; }; +JSON_DECL(LexiRouteRoutingMethod) + } // namespace tket #endif \ No newline at end of file diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index 919482efa3..813fc2d07d 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -91,8 +91,9 @@ void MappingFrontier::advance_next_2qb_slice(unsigned max_advance) { immediate_cut_vertices_v.begin(), immediate_cut_vertices_v.end(), target_v) != immediate_cut_vertices_v.end(); - if ((!in_slice && in_edges.size() > 1) || - this->circuit_.get_OpType_from_Vertex(target_v) == OpType::Output) { + if (((!in_slice && in_edges.size() > 1) || + this->circuit_.get_OpType_from_Vertex(target_v) == OpType::Output) && + this->circuit_.get_OpType_from_Vertex(target_v) != OpType::Barrier) { // Vertex either not allowed to pass, or is output vertex => update // nothing next_frontier->insert({pair.first, pair.second}); @@ -187,8 +188,6 @@ void MappingFrontier::advance_frontier_boundary( std::vector uids; for (const Edge& e : this->circuit_.get_in_edges_of_type(vert, EdgeType::Quantum)) { - // TODO: look at key_extractor in boost instead of this helper - // method... uids.push_back(get_unitid_from_unit_frontier( this->quantum_boundary, {this->circuit_.source(e), this->circuit_.get_source_port(e)})); @@ -203,7 +202,8 @@ void MappingFrontier::advance_frontier_boundary( } if (architecture->valid_operation( /* this->circuit_.get_OpType_from_Vertex(vert), */ - nodes)) { + nodes) || + this->circuit_.get_OpType_from_Vertex(vert) == OpType::Barrier) { // if no valid operation, boundary not updated and while loop terminates boundary_updated = true; for (const UnitID& uid : uids) { @@ -293,6 +293,7 @@ void MappingFrontier::update_quantum_boundary_uids( } // TODO: expects every qubit is present in permutation, even if unmoved +// TODO: should this also permute final map compared to initial map void MappingFrontier::permute_subcircuit_q_out_hole( const unit_map_t& final_permutation, Subcircuit& subcircuit) { EdgeVec new_q_out_hole; @@ -354,16 +355,16 @@ void MappingFrontier::set_quantum_boundary( } } -/** - * add_qubit - * Adds given UnitID as a qubit to held circuit. - * Updates boundary. - */ -void MappingFrontier::add_qubit(const UnitID& uid) { - Qubit qb(uid); - this->circuit_.add_qubit(qb); - this->quantum_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); -} +// /** +// * add_qubit +// * Adds given UnitID as a qubit to held circuit. +// * Updates boundary. +// */ +// void MappingFrontier::add_qubit(const UnitID& uid) { +// Qubit qb(uid); +// this->circuit_.add_qubit(qb); +// this->quantum_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); +// } /** * add_swap @@ -377,16 +378,20 @@ void MappingFrontier::add_swap(const UnitID& uid_0, const UnitID& uid_1) { auto uid1_in_it = this->quantum_boundary->find(uid_1); // Add Qubit if not in MappingFrontier boundary (i.e. not in circuit) + // If it so happens one of these is an ancilla, it works this out later... + // TODO: make it do that checking here ^^^^ + // implies that it is a new "ancilla" qubit if (uid0_in_it == this->quantum_boundary->end()) { - this->add_qubit(uid_0); + this->add_ancilla(uid_0); uid0_in_it = this->quantum_boundary->find(uid_0); } if (uid1_in_it == this->quantum_boundary->end()) { - this->add_qubit(uid_1); + this->add_ancilla(uid_1); uid1_in_it = this->quantum_boundary->find(uid_1); } // update held ancillas + // the location/id of the "ancilla node" changes when a SWAP occurs Node n0 = Node(uid_0); Node n1 = Node(uid_1); @@ -443,6 +448,9 @@ void MappingFrontier::add_swap(const UnitID& uid_0, const UnitID& uid_1) { this->circuit_.boundary.get().insert({uid_0, uid0_in, uid1_out}); this->circuit_.boundary.get().insert({uid_1, uid1_in, uid0_out}); + + std::map final_map = {{n0, n1}, {n1, n0}}; + this->circuit_.update_final_map(final_map); } void MappingFrontier::add_bridge( @@ -456,7 +464,7 @@ void MappingFrontier::add_bridge( // However, distances used to check BRIDGE and find PATH may use // central qubit that is unallocated, in which add it. if (central_in_it == this->quantum_boundary->end()) { - this->add_qubit(central); + this->add_ancilla(central); central_in_it = this->quantum_boundary->find(central); } @@ -482,4 +490,71 @@ void MappingFrontier::add_bridge( cx_v, Circuit::GraphRewiring::Yes, Circuit::VertexDeletion::Yes); } +void MappingFrontier::add_ancilla(const UnitID& ancilla) { + Qubit qb(ancilla); + this->circuit_.add_qubit(qb); + this->quantum_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); + this->ancilla_nodes_.insert(Node(ancilla)); + UnitID uid_ancilla(ancilla); + + unit_map_t update_map; + update_map.insert({uid_ancilla, uid_ancilla}); + this->circuit_.update_initial_map(update_map); + this->circuit_.update_final_map(update_map); +} + +void MappingFrontier::merge_ancilla( + const UnitID& merge, const UnitID& ancilla) { + // get output and input vertices + Vertex merge_v_in = this->circuit_.get_in(merge); + Vertex merge_v_out = this->circuit_.get_out(merge); + Vertex ancilla_v_out = this->circuit_.get_out(ancilla); + // find source vertex & port of merge_v_out + // output vertex, so can assume single edge + Edge merge_out_edge = this->circuit_.get_nth_out_edge(merge_v_in, 0); + Edge ancilla_in_edge = this->circuit_.get_nth_in_edge(ancilla_v_out, 0); + // Find port number + port_t merge_target_port = this->circuit_.get_target_port(merge_out_edge); + port_t ancilla_source_port = this->circuit_.get_source_port(ancilla_in_edge); + // Find vertices + Vertex merge_v_target = this->circuit_.target(merge_out_edge); + Vertex ancilla_v_source = this->circuit_.source(ancilla_in_edge); + + // remove and replace edges + this->circuit_.remove_edge(merge_out_edge); + this->circuit_.remove_edge(ancilla_in_edge); + this->circuit_.add_edge( + {ancilla_v_source, ancilla_source_port}, + {merge_v_target, merge_target_port}, EdgeType::Quantum); + + // instead of manually updating all boundaries, we change which output vertex + // the qubit paths to + Edge merge_in_edge = this->circuit_.get_nth_in_edge(merge_v_out, 0); + port_t merge_source_port = this->circuit_.get_source_port(merge_in_edge); + Vertex merge_v_source = this->circuit_.source(merge_in_edge); + + this->circuit_.remove_edge(merge_in_edge); + this->circuit_.add_edge( + {merge_v_source, merge_source_port}, {ancilla_v_out, 0}, + EdgeType::Quantum); + + // remove empty vertex wire, relabel dag vertices + this->circuit_.dag[merge_v_in].op = get_op_ptr(OpType::noop); + this->circuit_.dag[merge_v_out].op = get_op_ptr(OpType::noop); + this->circuit_.remove_vertex( + merge_v_in, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); + this->circuit_.remove_vertex( + merge_v_out, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); + + // Can now just erase "merge" qubit from the circuit + this->circuit_.boundary.get().erase(merge); + + if (this->circuit_.unit_bimaps_.initial) { + this->circuit_.unit_bimaps_.initial->right.erase(merge); + } + if (this->circuit_.unit_bimaps_.final) { + this->circuit_.unit_bimaps_.final->right.erase(merge); + } +} + } // namespace tket diff --git a/tket/src/Mapping/MappingFrontier.hpp b/tket/src/Mapping/MappingFrontier.hpp index eb64afb46a..f1eb102b72 100644 --- a/tket/src/Mapping/MappingFrontier.hpp +++ b/tket/src/Mapping/MappingFrontier.hpp @@ -98,15 +98,6 @@ struct MappingFrontier { */ unit_map_t get_default_to_quantum_boundary_unit_map() const; - /** - * add_qubit - * Adds given UnitID as Qubit to this->circuit_. - * Updates this->quantum_boundary with new Qubit. - * - * @param uid UnitID to add. - */ - void add_qubit(const UnitID& uid); - /** * add_swap * Inserts an OpType::SWAP gate into the uid_0 and uid_1 edges held in @@ -129,6 +120,24 @@ struct MappingFrontier { void add_bridge( const UnitID& control, const UnitID& central, const UnitID& target); + /** + * add_ancilla + * Adds an Ancillary UnitID to Circuit and tracked information + * + * @param ancilla UnitID of added ancilla + */ + void add_ancilla(const UnitID& ancilla); + + /** + * merge_ancilla + * Rewires this->circuit_.dag such that in wire to ancilla Output vertex + * is now mapped to out wire of merge Input vertex. + * + * @param merge UnitID to which ancilla path is prepended + * @param ancilla UnitID of ancilla opeartions + */ + void merge_ancilla(const UnitID& merge, const UnitID& ancilla); + /** * Assigns the quantum_boundary_ attribute to that passed to method. * diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index 4c70f9d3d6..3250345a1c 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -10,8 +10,7 @@ MappingManager::MappingManager(const ArchitecturePtr& _architecture) bool MappingManager::route_circuit( Circuit& circuit, - const std::vector>& routing_methods) - const { + const std::vector& routing_methods) const { // Assumption; Routing can not route a circuit // with more logical qubits than an Architecture has // physical qubits physically permitted @@ -58,10 +57,10 @@ bool MappingManager::route_circuit( bool valid_methods = false; for (const auto& rm : routing_methods) { // true => can use held routing method - if (rm.get().check_method(mapping_frontier, this->architecture_)) { + if (rm->check_method(mapping_frontier, this->architecture_)) { valid_methods = true; unit_map_t partial_permutation = - rm.get().routing_method(mapping_frontier, this->architecture_); + rm->routing_method(mapping_frontier, this->architecture_); if (partial_permutation.size() > 0) { std::map node_map; diff --git a/tket/src/Mapping/MappingManager.hpp b/tket/src/Mapping/MappingManager.hpp index 5715ac2d2b..1c1ba8a66f 100644 --- a/tket/src/Mapping/MappingManager.hpp +++ b/tket/src/Mapping/MappingManager.hpp @@ -39,8 +39,7 @@ class MappingManager { */ bool route_circuit( Circuit& circuit, - const std::vector>& routing_methods) - const; + const std::vector& routing_methods) const; private: ArchitecturePtr architecture_; diff --git a/tket/src/Mapping/RoutingMethod.hpp b/tket/src/Mapping/RoutingMethod.hpp index dc96acf938..ceb76e0b65 100644 --- a/tket/src/Mapping/RoutingMethod.hpp +++ b/tket/src/Mapping/RoutingMethod.hpp @@ -2,6 +2,7 @@ #define _TKET_RoutingMethod_H_ #include "Mapping/MappingFrontier.hpp" +#include "Utils/Json.hpp" namespace tket { @@ -46,7 +47,14 @@ class RoutingMethod { const ArchitecturePtr& /*architecture*/) const { return {}; } + + virtual nlohmann::json serialize() const { + throw JsonError( + "JSON serialization not implemented for given RoutingMethod."); + } }; + +typedef std::shared_ptr RoutingMethodPtr; } // namespace tket #endif \ No newline at end of file diff --git a/tket/src/Mapping/RoutingMethodCircuit.cpp b/tket/src/Mapping/RoutingMethodCircuit.cpp index 1366c9e1bd..32708452c9 100644 --- a/tket/src/Mapping/RoutingMethodCircuit.cpp +++ b/tket/src/Mapping/RoutingMethodCircuit.cpp @@ -65,4 +65,5 @@ unit_map_t RoutingMethodCircuit::routing_method( // return initial unit_map_t incase swap network required return swap_permutation; } + } // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/RoutingMethodCircuit.hpp b/tket/src/Mapping/RoutingMethodCircuit.hpp index 3c37d0883b..a500bf1b54 100644 --- a/tket/src/Mapping/RoutingMethodCircuit.hpp +++ b/tket/src/Mapping/RoutingMethodCircuit.hpp @@ -53,6 +53,9 @@ class RoutingMethodCircuit : public RoutingMethod { check_subcircuit_; unsigned max_size_, max_depth_; }; + +JSON_DECL(RoutingMethod); + } // namespace tket #endif diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp new file mode 100644 index 0000000000..55245d45f5 --- /dev/null +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -0,0 +1,36 @@ +#include "Mapping/RoutingMethodJson.hpp" + +namespace tket { + +void to_json(nlohmann::json& j, const RoutingMethod& rm) { j = rm.serialize(); } + +void from_json(const nlohmann::json& j, RoutingMethod& rm) { + std::string name = j.at("name").get(); + if (name == "LexiRouteRoutingMethod") { + rm = LexiRouteRoutingMethod::deserialize(j); + } else { + throw JsonError( + "Deserialization not yet implemented for generic RoutingMethod " + "objects."); + } +} + +void to_json(nlohmann::json& j, const std::vector& rmp) { + for (const auto& r : rmp) { + j.push_back(*r); + } +} + +void from_json(const nlohmann::json& j, std::vector& rmp) { + for (const auto& c : j) { + std::string name = c.at("name").get(); + if (name == "LexiRouteRoutingMethod") { + rmp.push_back(std::make_shared( + LexiRouteRoutingMethod::deserialize(c))); + } else { + rmp.push_back(std::make_shared(c.get())); + } + } +} + +} // namespace tket diff --git a/tket/src/Mapping/RoutingMethodJson.hpp b/tket/src/Mapping/RoutingMethodJson.hpp new file mode 100644 index 0000000000..1c6cb7d359 --- /dev/null +++ b/tket/src/Mapping/RoutingMethodJson.hpp @@ -0,0 +1,24 @@ +#ifndef _TKET_RoutingMethodJson_H_ +#define _TKET_RoutingMethodJson_H_ + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" +#include "Utils/Json.hpp" + +namespace tket { + +void to_json(nlohmann::json& j, const RoutingMethod& rm); + +void from_json(const nlohmann::json& j, RoutingMethod& rm); + +JSON_DECL(RoutingMethod); + +void to_json(nlohmann::json& j, const std::vector& rmp); + +void from_json(const nlohmann::json& j, std::vector& rmp); + +JSON_DECL(std::vector); + +} // namespace tket + +#endif diff --git a/tket/src/Predicates/CompilerPass.cpp b/tket/src/Predicates/CompilerPass.cpp index 8bd777078a..953deb30a2 100644 --- a/tket/src/Predicates/CompilerPass.cpp +++ b/tket/src/Predicates/CompilerPass.cpp @@ -14,6 +14,7 @@ #include "CompilerPass.hpp" +#include "Mapping/RoutingMethodJson.hpp" #include "PassGenerators.hpp" #include "PassLibrary.hpp" #include "Utils/Json.hpp" @@ -429,8 +430,9 @@ void from_json(const nlohmann::json& j, PassPtr& pp) { pp = gen_euler_pass(q, p, s); } else if (passname == "RoutingPass") { Architecture arc = content.at("architecture").get(); - RoutingConfig con = content.at("routing_config").get(); + std::vector con = content.at("routing_config"); pp = gen_routing_pass(arc, con); + } else if (passname == "PlacementPass") { pp = gen_placement_pass(content.at("placement").get()); } else if (passname == "RenameQubitsPass") { @@ -484,7 +486,8 @@ void from_json(const nlohmann::json& j, PassPtr& pp) { // SEQUENCE PASS - DESERIALIZABLE ONLY Architecture arc = content.at("architecture").get(); PlacementPtr place = content.at("placement").get(); - RoutingConfig config = content.at("routing_config").get(); + std::vector config = content.at("routing_config"); + pp = gen_full_mapping_pass(arc, place, config); } else if (passname == "DefaultMappingPass") { // SEQUENCE PASS - DESERIALIZABLE ONLY @@ -494,7 +497,7 @@ void from_json(const nlohmann::json& j, PassPtr& pp) { // SEQUENCE PASS - DESERIALIZABLE ONLY Architecture arc = content.at("architecture").get(); PlacementPtr place = content.at("placement").get(); - RoutingConfig config = content.at("routing_config").get(); + std::vector config = content.at("routing_config"); bool directed_cx = content.at("directed").get(); bool delay_measures = content.at("delay_measures").get(); pp = gen_cx_mapping_pass(arc, place, config, directed_cx, delay_measures); diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index b0720080f3..4f0c50477d 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -18,6 +18,8 @@ #include "Circuit/CircPool.hpp" #include "Circuit/Circuit.hpp" #include "Converters/PhasePoly.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/RoutingMethod.hpp" #include "Predicates/CompilationUnit.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassLibrary.hpp" @@ -171,18 +173,20 @@ PassPtr gen_placement_pass(const PlacementPtr& placement_ptr) { PassPtr gen_full_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, - const RoutingConfig& config) { + const std::vector& config) { return gen_placement_pass(placement_ptr) >> gen_routing_pass(arc, config); } PassPtr gen_default_mapping_pass(const Architecture& arc) { PlacementPtr pp = std::make_shared(arc); - return gen_full_mapping_pass(arc, pp); + RoutingMethodPtr rmw = std::make_shared(100); + return gen_full_mapping_pass(arc, pp, {rmw}); } PassPtr gen_cx_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, - const RoutingConfig& config, bool directed_cx, bool delay_measures) { + const std::vector>& config, bool directed_cx, + bool delay_measures) { PassPtr rebase_pass = gen_rebase_pass( {OpType::CX}, CircPool::CX(), all_single_qubit_types(), Transform::tk1_to_tk1); @@ -195,15 +199,12 @@ PassPtr gen_cx_mapping_pass( return return_pass; } -PassPtr gen_routing_pass(const Architecture& arc, const RoutingConfig& config) { - Transform::Transformation trans = - [=](Circuit& circ) { // this doesn't work if capture by ref for some - // reason.... - Routing route(circ, arc); - std::pair circbool = route.solve(config); - circ = circbool.first; - return circbool.second; - }; +PassPtr gen_routing_pass( + const Architecture& arc, const std::vector& config) { + Transform::Transformation trans = [=](Circuit& circ) { + MappingManager mm(std::make_shared(arc)); + return mm.route_circuit(circ, config); + }; Transform t = Transform(trans); PredicatePtr twoqbpred = std::make_shared(); @@ -403,7 +404,7 @@ PassPtr gen_full_mapping_pass_phase_poly( } PassPtr gen_directed_cx_routing_pass( - const Architecture& arc, const RoutingConfig& config) { + const Architecture& arc, const std::vector& config) { OpTypeSet multis = {OpType::CX, OpType::BRIDGE, OpType::SWAP}; return gen_routing_pass(arc, config) >> gen_rebase_pass( diff --git a/tket/src/Predicates/PassGenerators.hpp b/tket/src/Predicates/PassGenerators.hpp index a2b5489cd4..6298d65ae3 100644 --- a/tket/src/Predicates/PassGenerators.hpp +++ b/tket/src/Predicates/PassGenerators.hpp @@ -16,6 +16,9 @@ #include "ArchAwareSynth/SteinerForest.hpp" #include "CompilerPass.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" + namespace tket { /* a wrapper method for the rebase_factory in Transforms */ @@ -42,18 +45,19 @@ PassPtr gen_rename_qubits_pass(const std::map& qm); PassPtr gen_placement_pass(const PlacementPtr& placement_ptr); /* This higher order function generates a Routing pass using the -RoutingConfig object */ +std::vector object */ PassPtr gen_full_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, - const RoutingConfig& config = {}); + const std::vector& config); PassPtr gen_default_mapping_pass(const Architecture& arc); PassPtr gen_cx_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, - const RoutingConfig& config, bool directed_cx, bool delay_measures); + const std::vector& config, bool directed_cx, + bool delay_measures); PassPtr gen_routing_pass( - const Architecture& arc, const RoutingConfig& config = {}); + const Architecture& arc, const std::vector& config); PassPtr gen_directed_cx_routing_pass( - const Architecture& arc, const RoutingConfig& config = {}); + const Architecture& arc, const std::vector& config); /** * execute architecture aware synthesis on a given architecture for an allready diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index ca55f96f03..106d581854 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "ArchitectureMapping.hpp" #include @@ -29,6 +43,52 @@ ArchitectureMapping::ArchitectureMapping(const Architecture& arch) } } +ArchitectureMapping::ArchitectureMapping( + const Architecture& arch, + const std::vector>& edges) + : m_arch(arch) { + auto& node_to_vertex_mapping = m_node_to_vertex_mapping; + auto& vertex_to_node_mapping = m_vertex_to_node_mapping; + + const auto add_node = [&node_to_vertex_mapping, + &vertex_to_node_mapping](unsigned nn) { + const Node node(nn); + if (node_to_vertex_mapping.count(node) == 0) { + node_to_vertex_mapping[node] = vertex_to_node_mapping.size(); + vertex_to_node_mapping.push_back(node); + } + }; + + // The nodes are labelled 0,1,2,... in order of appearance. + // Nothing special about this ordering, just for backwards compatibility. + for (const auto& entry : edges) { + add_node(entry.first); + add_node(entry.second); + } + + // Check that the nodes agree with the architecture object. + const auto uids = arch.nodes(); + if (uids.size() != m_vertex_to_node_mapping.size()) { + std::stringstream ss; + ss << "ArchitectureMapping: passed in " << edges.size() << " edges, giving " + << m_vertex_to_node_mapping.size() + << " vertices; but the architecture object has " << uids.size() + << " vertices"; + throw std::runtime_error(ss.str()); + } + for (const UnitID& uid : uids) { + const Node node(uid); + if (m_node_to_vertex_mapping.count(node) == 0) { + std::stringstream ss; + ss << "ArchitectureMapping: passed in " << edges.size() + << " edges, giving " << m_vertex_to_node_mapping.size() + << " vertices; but the architecture object has an unknown node " + << node.repr(); + throw std::runtime_error(ss.str()); + } + } +} + size_t ArchitectureMapping::number_of_vertices() const { return m_vertex_to_node_mapping.size(); } diff --git a/tket/src/TokenSwapping/ArchitectureMapping.hpp b/tket/src/TokenSwapping/ArchitectureMapping.hpp index d3710fb09c..37d15f7f67 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.hpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_ArchitectureMapping_H_ -#define _TKET_TokenSwapping_ArchitectureMapping_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Architecture/Architecture.hpp" #include "TSAUtils/SwapFunctions.hpp" @@ -30,6 +43,25 @@ class ArchitectureMapping { */ explicit ArchitectureMapping(const Architecture& arch); + /** If the architecture object was initialised with explicit edges, + * use these edges (rather than the Architecture nodes() function) + * to create the Node <-> size_t mapping, in a fixed way not dependent + * on Architecture (the reason being that Architecture does not guarantee + * the mapping, but if we change the labels then we change to an isomorphic + * but different token swapping problem, which messes up testing. + * In practice every implementation of token swapping, except for the ultimate + * probably exponential-time optimal algorithm, is going to depend + * on the labels. Even if we had a fast graph isomorphism routine, the labels + * would still not be uniquely determined, as they could be permuted). + * @param arch The finished Architecture object, must remain valid + * for the lifetime of this object. + * @param edges Edges originally used to construct the Architecture object. + * These will uniquely determine the internal Node <-> size_t mapping. + */ + ArchitectureMapping( + const Architecture& arch, + const std::vector>& edges); + /** Convenient reference to the Architecture object we used * to construct this ArchitectureMapping. */ @@ -75,4 +107,3 @@ class ArchitectureMapping { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/BestFullTsa.cpp b/tket/src/TokenSwapping/BestFullTsa.cpp index a70a20fc6a..517cd48d19 100644 --- a/tket/src/TokenSwapping/BestFullTsa.cpp +++ b/tket/src/TokenSwapping/BestFullTsa.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "BestFullTsa.hpp" #include "DistancesFromArchitecture.hpp" diff --git a/tket/src/TokenSwapping/BestFullTsa.hpp b/tket/src/TokenSwapping/BestFullTsa.hpp index 2ed5baf555..d474fb898d 100644 --- a/tket/src/TokenSwapping/BestFullTsa.hpp +++ b/tket/src/TokenSwapping/BestFullTsa.hpp @@ -1,5 +1,19 @@ -#ifndef _TKET_TokenSwapping_BestFullTsa_H_ -#define _TKET_TokenSwapping_BestFullTsa_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + #include "ArchitectureMapping.hpp" #include "HybridTsa00.hpp" #include "RNG.hpp" @@ -65,4 +79,3 @@ class BestFullTsa : public PartialTsaInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.cpp b/tket/src/TokenSwapping/CyclesCandidateManager.cpp index d430527cf5..338d07c17b 100644 --- a/tket/src/TokenSwapping/CyclesCandidateManager.cpp +++ b/tket/src/TokenSwapping/CyclesCandidateManager.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "CyclesCandidateManager.hpp" #include diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.hpp b/tket/src/TokenSwapping/CyclesCandidateManager.hpp index 769d70ce68..36cb7b84fd 100644 --- a/tket/src/TokenSwapping/CyclesCandidateManager.hpp +++ b/tket/src/TokenSwapping/CyclesCandidateManager.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_CyclesCandidateManager_H_ -#define _TKET_TokenSwapping_CyclesCandidateManager_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include @@ -177,4 +190,3 @@ class CyclesCandidateManager { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.cpp b/tket/src/TokenSwapping/CyclesGrowthManager.cpp index 6770c871af..bfb0f94cd8 100644 --- a/tket/src/TokenSwapping/CyclesGrowthManager.cpp +++ b/tket/src/TokenSwapping/CyclesGrowthManager.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "CyclesGrowthManager.hpp" #include diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.hpp b/tket/src/TokenSwapping/CyclesGrowthManager.hpp index 670b19da29..87f2bac7cb 100644 --- a/tket/src/TokenSwapping/CyclesGrowthManager.hpp +++ b/tket/src/TokenSwapping/CyclesGrowthManager.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_CyclesGrowthManager_H_ -#define _TKET_TokenSwapping_CyclesGrowthManager_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "DistancesInterface.hpp" #include "NeighboursInterface.hpp" @@ -219,4 +232,3 @@ class CyclesGrowthManager { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/CyclesPartialTsa.cpp b/tket/src/TokenSwapping/CyclesPartialTsa.cpp index 04326442e8..914bc8dd63 100644 --- a/tket/src/TokenSwapping/CyclesPartialTsa.cpp +++ b/tket/src/TokenSwapping/CyclesPartialTsa.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "CyclesPartialTsa.hpp" #include "Utils/Assert.hpp" diff --git a/tket/src/TokenSwapping/CyclesPartialTsa.hpp b/tket/src/TokenSwapping/CyclesPartialTsa.hpp index 7f8dfe7421..c393270694 100644 --- a/tket/src/TokenSwapping/CyclesPartialTsa.hpp +++ b/tket/src/TokenSwapping/CyclesPartialTsa.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_CyclesPartialTsa_H_ -#define _TKET_TokenSwapping_CyclesPartialTsa_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "CyclesCandidateManager.hpp" #include "PartialTsaInterface.hpp" @@ -102,4 +115,3 @@ class CyclesPartialTsa : public PartialTsaInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp index 6de62fe698..f5fe4a0050 100644 --- a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp @@ -1,8 +1,21 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "CyclicShiftCostEstimate.hpp" #include "Utils/Assert.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp index 031d6901d8..647839e9ac 100644 --- a/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_CyclicShiftCostEstimate_H_ -#define _TKET_TokenSwapping_CyclicShiftCostEstimate_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include @@ -61,4 +74,3 @@ struct CyclicShiftCostEstimate { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp index c0b4f597e4..ad8039edcb 100644 --- a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "DistancesFromArchitecture.hpp" #include diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.hpp b/tket/src/TokenSwapping/DistancesFromArchitecture.hpp index a6c2c33ed4..f6d6ea12e1 100644 --- a/tket/src/TokenSwapping/DistancesFromArchitecture.hpp +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_DistancesFromArchitecture_H_ -#define _TKET_TokenSwapping_DistancesFromArchitecture_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "ArchitectureMapping.hpp" #include "DistancesInterface.hpp" @@ -73,4 +86,3 @@ class DistancesFromArchitecture : public DistancesInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/DistancesInterface.cpp b/tket/src/TokenSwapping/DistancesInterface.cpp index ab19564597..35363c7505 100644 --- a/tket/src/TokenSwapping/DistancesInterface.cpp +++ b/tket/src/TokenSwapping/DistancesInterface.cpp @@ -1,6 +1,19 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "DistancesInterface.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/DistancesInterface.hpp b/tket/src/TokenSwapping/DistancesInterface.hpp index efff6c7717..fa3488ba18 100644 --- a/tket/src/TokenSwapping/DistancesInterface.hpp +++ b/tket/src/TokenSwapping/DistancesInterface.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_DistancesInterface_H_ -#define _TKET_TokenSwapping_DistancesInterface_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -53,4 +66,3 @@ class DistancesInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/DynamicTokenTracker.cpp b/tket/src/TokenSwapping/DynamicTokenTracker.cpp index bf79857095..fe0e1dc234 100644 --- a/tket/src/TokenSwapping/DynamicTokenTracker.cpp +++ b/tket/src/TokenSwapping/DynamicTokenTracker.cpp @@ -1,6 +1,18 @@ -#include "DynamicTokenTracker.hpp" +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -; +#include "DynamicTokenTracker.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/DynamicTokenTracker.hpp b/tket/src/TokenSwapping/DynamicTokenTracker.hpp index b53386ad99..a230e77869 100644 --- a/tket/src/TokenSwapping/DynamicTokenTracker.hpp +++ b/tket/src/TokenSwapping/DynamicTokenTracker.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_DynamicTokenTracker_H_ -#define _TKET_TokenSwapping_DynamicTokenTracker_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "TSAUtils/VertexMappingFunctions.hpp" @@ -79,4 +92,3 @@ class DynamicTokenTracker { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/HybridTsa00.cpp b/tket/src/TokenSwapping/HybridTsa00.cpp index 45992b8367..f897c3e047 100644 --- a/tket/src/TokenSwapping/HybridTsa00.cpp +++ b/tket/src/TokenSwapping/HybridTsa00.cpp @@ -1,9 +1,22 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "HybridTsa00.hpp" #include "TSAUtils/DistanceFunctions.hpp" #include "Utils/Assert.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/HybridTsa00.hpp b/tket/src/TokenSwapping/HybridTsa00.hpp index 11b911bc26..3c5d86d9b4 100644 --- a/tket/src/TokenSwapping/HybridTsa00.hpp +++ b/tket/src/TokenSwapping/HybridTsa00.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_HybridTsa00_H_ -#define _TKET_TokenSwapping_HybridTsa00_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "CyclesPartialTsa.hpp" #include "TrivialTSA.hpp" @@ -48,4 +61,3 @@ class HybridTsa00 : public PartialTsaInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp index 914171e387..b8ed27bb68 100644 --- a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp @@ -1,11 +1,23 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "NeighboursFromArchitecture.hpp" #include #include #include -; - namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.hpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.hpp index 7cf6b16550..e32b531ebc 100644 --- a/tket/src/TokenSwapping/NeighboursFromArchitecture.hpp +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_NeighboursFromArchitecture_H_ -#define _TKET_TokenSwapping_NeighboursFromArchitecture_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "ArchitectureMapping.hpp" #include "NeighboursInterface.hpp" @@ -36,4 +49,3 @@ class NeighboursFromArchitecture : public NeighboursInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/NeighboursInterface.cpp b/tket/src/TokenSwapping/NeighboursInterface.cpp index ad91f163c8..7a5773e33c 100644 --- a/tket/src/TokenSwapping/NeighboursInterface.cpp +++ b/tket/src/TokenSwapping/NeighboursInterface.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "NeighboursInterface.hpp" #include "Utils/Exceptions.hpp" diff --git a/tket/src/TokenSwapping/NeighboursInterface.hpp b/tket/src/TokenSwapping/NeighboursInterface.hpp index 68fb2335ed..371552a37d 100644 --- a/tket/src/TokenSwapping/NeighboursInterface.hpp +++ b/tket/src/TokenSwapping/NeighboursInterface.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_NeighboursInterface_H_ -#define _TKET_TokenSwapping_NeighboursInterface_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -31,4 +44,3 @@ class NeighboursInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/PartialTsaInterface.cpp b/tket/src/TokenSwapping/PartialTsaInterface.cpp index 90a3320212..afac5357dd 100644 --- a/tket/src/TokenSwapping/PartialTsaInterface.cpp +++ b/tket/src/TokenSwapping/PartialTsaInterface.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "PartialTsaInterface.hpp" #include "Utils/Exceptions.hpp" diff --git a/tket/src/TokenSwapping/PartialTsaInterface.hpp b/tket/src/TokenSwapping/PartialTsaInterface.hpp index 61abddb57f..82e6cfc03a 100644 --- a/tket/src/TokenSwapping/PartialTsaInterface.hpp +++ b/tket/src/TokenSwapping/PartialTsaInterface.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_PartialTsaInterface_H_ -#define _TKET_TokenSwapping_PartialTsaInterface_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "DistancesInterface.hpp" #include "NeighboursInterface.hpp" @@ -53,4 +66,3 @@ class PartialTsaInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/PathFinderInterface.cpp b/tket/src/TokenSwapping/PathFinderInterface.cpp index ac647f1f38..c9f95ad134 100644 --- a/tket/src/TokenSwapping/PathFinderInterface.cpp +++ b/tket/src/TokenSwapping/PathFinderInterface.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "PathFinderInterface.hpp" #include "Utils/Exceptions.hpp" diff --git a/tket/src/TokenSwapping/PathFinderInterface.hpp b/tket/src/TokenSwapping/PathFinderInterface.hpp index b0a3200642..a6548f9fd4 100644 --- a/tket/src/TokenSwapping/PathFinderInterface.hpp +++ b/tket/src/TokenSwapping/PathFinderInterface.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_PathFinderInterface_H_ -#define _TKET_TokenSwapping_PathFinderInterface_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -71,4 +84,3 @@ class PathFinderInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/RNG.hpp b/tket/src/TokenSwapping/RNG.hpp index e2fde99a2f..ac6fc7c73a 100644 --- a/tket/src/TokenSwapping/RNG.hpp +++ b/tket/src/TokenSwapping/RNG.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_RNG_H_ -#define _TKET_TokenSwapping_RNG_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -172,4 +185,3 @@ class RNG { }; } // namespace tket -#endif \ No newline at end of file diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp index 8ada732b11..af470ef7af 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "RiverFlowPathFinder.hpp" #include @@ -6,7 +20,6 @@ #include "TSAUtils/SwapFunctions.hpp" #include "Utils/Assert.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.hpp b/tket/src/TokenSwapping/RiverFlowPathFinder.hpp index 901ae73310..23546388df 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.hpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_RiverFlowPathFinder_H_ -#define _TKET_TokenSwapping_RiverFlowPathFinder_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -83,4 +96,3 @@ class RiverFlowPathFinder : public PathFinderInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/SwapListOptimiser.cpp b/tket/src/TokenSwapping/SwapListOptimiser.cpp index 3b8c24ab0f..28c4e29fcb 100644 --- a/tket/src/TokenSwapping/SwapListOptimiser.cpp +++ b/tket/src/TokenSwapping/SwapListOptimiser.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "SwapListOptimiser.hpp" #include "TSAUtils/VertexSwapResult.hpp" diff --git a/tket/src/TokenSwapping/SwapListOptimiser.hpp b/tket/src/TokenSwapping/SwapListOptimiser.hpp index d01ad080d5..c3ce30d5ff 100644 --- a/tket/src/TokenSwapping/SwapListOptimiser.hpp +++ b/tket/src/TokenSwapping/SwapListOptimiser.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_SwapListOptimiser_H_ -#define _TKET_TokenSwapping_SwapListOptimiser_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "DynamicTokenTracker.hpp" @@ -146,4 +159,3 @@ class SwapListOptimiser { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp index 390911aa84..1f50673ec8 100644 --- a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "DebugFunctions.hpp" #include diff --git a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.hpp index 0d8c62bcb7..a2fa9f1625 100644 --- a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.hpp +++ b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TSAUtils_DebugFunctions_H_ -#define _TKET_TokenSwapping_TSAUtils_DebugFunctions_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include @@ -31,4 +44,3 @@ std::string str(const std::vector& swaps); } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp index bd9981248e..16f28fc4f0 100644 --- a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp @@ -1,10 +1,22 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "DistanceFunctions.hpp" #include #include -; - namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp index 4ab6123c4f..cd077dee48 100644 --- a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp +++ b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TSAUtils_DistanceFunctions_H_ -#define _TKET_TokenSwapping_TSAUtils_DistanceFunctions_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -73,4 +86,3 @@ size_t get_swaps_lower_bound( } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp index 6eef0b7712..243fe46de7 100644 --- a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp @@ -1,10 +1,22 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "GeneralFunctions.hpp" #include #include -; - namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp index 165b085c29..bcf9162590 100644 --- a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TSAUtils_GeneralFunctions_H_ -#define _TKET_TokenSwapping_TSAUtils_GeneralFunctions_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once // This is for "leftover" functions not specifically linked to token swapping // which are candidates for being used and moved elsewhere, @@ -84,4 +97,3 @@ std::set get_random_set( } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp index e5c38aabaa..7c2d9dbfe5 100644 --- a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp @@ -1,10 +1,22 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "SwapFunctions.hpp" #include #include -; - namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp index 3437c38fe5..e259c051ec 100644 --- a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp +++ b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TSAUtils_SwapFunctions_H_ -#define _TKET_TokenSwapping_TSAUtils_SwapFunctions_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -33,4 +46,3 @@ bool disjoint(const Swap& swap1, const Swap& swap2); } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index b817124aba..9ac8ba8a09 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "VertexMappingFunctions.hpp" #include @@ -6,8 +20,6 @@ #include "../../Utils/Assert.hpp" #include "VertexSwapResult.hpp" -; - namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.hpp index 00c490163a..752040e3b5 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.hpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TSAUtils_VertexMappingFunctions_H_ -#define _TKET_TokenSwapping_TSAUtils_VertexMappingFunctions_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -71,4 +84,3 @@ void add_swap(VertexMapping& source_to_target_map, const Swap& swap); } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp index ec3d345135..0a9414e6d2 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp @@ -1,6 +1,18 @@ -#include "VertexSwapResult.hpp" +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -; +#include "VertexSwapResult.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.hpp b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.hpp index 9a222cfa35..441678c4d4 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.hpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TSAUtils_VertexSwapResult_H_ -#define _TKET_TokenSwapping_TSAUtils_VertexSwapResult_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -44,4 +57,3 @@ struct VertexSwapResult { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp index 292528cc90..f4733fd6a0 100644 --- a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "CanonicalRelabelling.hpp" #include diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp index c51c5a7db4..ede7f32ef3 100644 --- a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TableLookup_CanonicalRelabelling_H_ -#define _TKET_TokenSwapping_TableLookup_CanonicalRelabelling_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "../TSAUtils/VertexMappingFunctions.hpp" namespace tket { @@ -94,4 +107,3 @@ class CanonicalRelabelling { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp index 1b48856076..b451f9dba8 100644 --- a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "ExactMappingLookup.hpp" #include diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.hpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.hpp index 8ac9b84bef..dc3b1b8e72 100644 --- a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.hpp +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.hpp @@ -1,6 +1,18 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -#ifndef _TKET_TokenSwapping_TableLookup_ExactMappingLookup_H_ -#define _TKET_TokenSwapping_TableLookup_ExactMappingLookup_H_ +#pragma once #include "CanonicalRelabelling.hpp" @@ -66,4 +78,3 @@ class ExactMappingLookup { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp index 305bb95886..0e98536471 100644 --- a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp +++ b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "FilteredSwapSequences.hpp" #include diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.hpp b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.hpp index bc8ff6addc..72cea46c39 100644 --- a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.hpp +++ b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TableLookup_FilteredSwapSequences_H_ -#define _TKET_TokenSwapping_TableLookup_FilteredSwapSequences_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -123,4 +136,3 @@ class FilteredSwapSequences { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp index f4e86f8aa2..b4aa171cfc 100644 --- a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp @@ -1,10 +1,23 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "PartialMappingLookup.hpp" #include #include "Utils/Assert.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.hpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.hpp index 0ff0f3f8c6..deaa7dda72 100644 --- a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.hpp +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.hpp @@ -1,6 +1,19 @@ -#ifndef _TKET_TokenSwapping_TableLookup_PartialMappingLookup_H_ -#define _TKET_TokenSwapping_TableLookup_PartialMappingLookup_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -61,4 +74,3 @@ class PartialMappingLookup { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp index 382ae65f4e..25c7ce3f09 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp @@ -1,8 +1,21 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "SwapConversion.hpp" #include "Utils/Assert.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.hpp b/tket/src/TokenSwapping/TableLookup/SwapConversion.hpp index 38b8424821..b2d73b6d8e 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapConversion.hpp +++ b/tket/src/TokenSwapping/TableLookup/SwapConversion.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TableLookup_SwapConversion_H_ -#define _TKET_TokenSwapping_TableLookup_SwapConversion_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include @@ -86,4 +99,3 @@ struct SwapConversion { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp index 197def52d0..0c07183d08 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "SwapListSegmentOptimiser.hpp" #include @@ -6,7 +20,6 @@ #include "../../Utils/Assert.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp index 41d9e606b9..006273807e 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp @@ -1,6 +1,19 @@ -#ifndef _TKET_TokenSwapping_TableLookup_SwapListSegmentOptimiser_H_ -#define _TKET_TokenSwapping_TableLookup_SwapListSegmentOptimiser_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -82,4 +95,3 @@ class SwapListSegmentOptimiser { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp index 5b1de153f9..c3cf69c41a 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "SwapListTableOptimiser.hpp" #include @@ -7,8 +21,6 @@ #include "../TSAUtils/DebugFunctions.hpp" #include "Utils/Assert.hpp" -; - namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp index 0ec2dae717..3d97025bac 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp @@ -1,6 +1,19 @@ -#ifndef _TKET_TokenSwapping_TableLookup_SwapListTableOptimiser_H_ -#define _TKET_TokenSwapping_TableLookup_SwapListTableOptimiser_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include @@ -76,4 +89,3 @@ class SwapListTableOptimiser { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp index 2cbbf21af2..ad1c412983 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "SwapSequenceTable.hpp" namespace tket { diff --git a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.hpp b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.hpp index b80c5f1be1..6ff7237f9a 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.hpp +++ b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TableLookup_SwapSequenceTable_H_ -#define _TKET_TokenSwapping_TableLookup_SwapSequenceTable_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -102,4 +115,3 @@ struct SwapSequenceTable { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp index 5c1e83e7f4..a2b7367f8b 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "VertexMapResizing.hpp" #include @@ -5,7 +19,6 @@ #include "Utils/Assert.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp index a049bb13c1..aafeb49f20 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TableLookup_VertexMapResizing_H_ -#define _TKET_TokenSwapping_TableLookup_VertexMapResizing_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -105,4 +118,3 @@ class VertexMapResizing : public NeighboursInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/TrivialTSA.cpp b/tket/src/TokenSwapping/TrivialTSA.cpp index 3e1006417d..372e4e8528 100644 --- a/tket/src/TokenSwapping/TrivialTSA.cpp +++ b/tket/src/TokenSwapping/TrivialTSA.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "TrivialTSA.hpp" #include @@ -10,7 +24,6 @@ #include "TSAUtils/VertexSwapResult.hpp" #include "Utils/Assert.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/TrivialTSA.hpp b/tket/src/TokenSwapping/TrivialTSA.hpp index 2b054d1f43..7260e5c9f0 100644 --- a/tket/src/TokenSwapping/TrivialTSA.hpp +++ b/tket/src/TokenSwapping/TrivialTSA.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_TrivialTSA_H_ -#define _TKET_TokenSwapping_TrivialTSA_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include @@ -206,4 +219,3 @@ class TrivialTSA : public PartialTsaInterface { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/VectorListHybrid.hpp index 0e88ecec21..3b3f4a9bce 100644 --- a/tket/src/TokenSwapping/VectorListHybrid.hpp +++ b/tket/src/TokenSwapping/VectorListHybrid.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_VectorListHybrid_H_ -#define _TKET_TokenSwapping_VectorListHybrid_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -519,4 +532,3 @@ std::string VectorListHybrid::debug_str() const { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp index 05c832848e..8fa6495f79 100644 --- a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "VectorListHybridSkeleton.hpp" #include diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.hpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.hpp index 1883a09202..2bf9649ca7 100644 --- a/tket/src/TokenSwapping/VectorListHybridSkeleton.hpp +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_VectorListHybridSkeleton_H_ -#define _TKET_TokenSwapping_VectorListHybridSkeleton_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -150,4 +163,3 @@ class VectorListHybridSkeleton { } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/src/TokenSwapping/main_entry_functions.cpp b/tket/src/TokenSwapping/main_entry_functions.cpp index 9b075e26c4..f315b14aeb 100644 --- a/tket/src/TokenSwapping/main_entry_functions.cpp +++ b/tket/src/TokenSwapping/main_entry_functions.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "main_entry_functions.hpp" #include diff --git a/tket/src/TokenSwapping/main_entry_functions.hpp b/tket/src/TokenSwapping/main_entry_functions.hpp index 2dbfcd57fb..14a1c7d4c6 100644 --- a/tket/src/TokenSwapping/main_entry_functions.hpp +++ b/tket/src/TokenSwapping/main_entry_functions.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TokenSwapping_main_entry_functions_H_ -#define _TKET_TokenSwapping_main_entry_functions_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -49,4 +62,3 @@ std::tuple get_swaps( const unit_map_t& desired_logical_to_physical_map); } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp index e7882f53e2..e1f06aee30 100644 --- a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp +++ b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "FixedCompleteSolutions.hpp" #include diff --git a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp index 82bcfa3b81..9681980374 100644 --- a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp +++ b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp @@ -1,5 +1,19 @@ -#ifndef _TKET_TESTS_TokenSwapping_Data_FixedCompleteSolutions_H_ -#define _TKET_TESTS_TokenSwapping_Data_FixedCompleteSolutions_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + #include #include #include @@ -64,4 +78,3 @@ struct FixedCompleteSolutions { } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp b/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp index d0c5df6359..7a1a339b94 100644 --- a/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp +++ b/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "FixedSwapSequences.hpp" namespace tket { diff --git a/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp b/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp index 0a2c2aaed8..a4846faad8 100644 --- a/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp +++ b/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TESTS_TokenSwapping_Data_FixedSwapSequences_H_ -#define _TKET_TESTS_TokenSwapping_Data_FixedSwapSequences_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -75,4 +88,3 @@ struct FixedSwapSequences { } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp index d1dfc8ce98..ca4eea1c4f 100644 --- a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp @@ -1,9 +1,21 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "NeighboursFromEdges.hpp" #include -; - namespace tket { namespace tsa_internal { namespace tests { diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp index dc9c17bd75..dff7cc4b15 100644 --- a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TESTS_TokenSwapping_TableLookup_NeighboursFromEdges_H_ -#define _TKET_TESTS_TokenSwapping_TableLookup_NeighboursFromEdges_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include @@ -48,4 +61,3 @@ NeighboursFromEdges::NeighboursFromEdges(const SwapContainer& edges) { } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp index 9c84b236c3..5c139c697e 100644 --- a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp +++ b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "PermutationTestUtils.hpp" #include diff --git a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp index 73c028194b..3b452ae42a 100644 --- a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp +++ b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TESTS_TokenSwapping_TableLookup_PermutationTestUtils_H_ -#define _TKET_TESTS_TokenSwapping_TableLookup_PermutationTestUtils_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include @@ -24,4 +37,3 @@ struct PermutationTestUtils { } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp index 21c8256a7c..ad488009d3 100644 --- a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "SwapSequenceReductionTester.hpp" #include @@ -8,7 +22,6 @@ #include "TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp" #include "TokenSwapping/TableLookup/VertexMapResizing.hpp" -; using std::vector; namespace tket { diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp index 5ec90aaa46..c02f6872a8 100644 --- a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TESTS_TokenSwapping_TableLookup_SwapSequenceReductionTester_H_ -#define _TKET_TESTS_TokenSwapping_TableLookup_SwapSequenceReductionTester_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include @@ -54,4 +67,3 @@ struct SequenceReductionStats { } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp index abfd296282..f992bdbddc 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include @@ -7,7 +21,6 @@ #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/TableLookup/CanonicalRelabelling.hpp" -; using std::vector; namespace tket { diff --git a/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp index 8d679d3a4c..82b91fdfda 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include @@ -6,7 +20,6 @@ #include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" #include "TokenSwapping/TableLookup/ExactMappingLookup.hpp" -; using std::vector; namespace tket { diff --git a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp index 427c31ed7a..00e76fe5b4 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include @@ -5,7 +19,6 @@ #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/TableLookup/FilteredSwapSequences.hpp" -; using std::vector; namespace tket { diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp index bd48493dc1..e4818c14d2 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include @@ -5,7 +19,6 @@ #include "../Data/FixedSwapSequences.hpp" #include "SwapSequenceReductionTester.hpp" -; using std::vector; // NOTE: running all tests in this file currently takes ~19 seconds diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp index 296a0b5ed0..eed4159c5e 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include @@ -7,7 +21,6 @@ #include "TokenSwapping/TableLookup/SwapConversion.hpp" #include "TokenSwapping/TableLookup/SwapSequenceTable.hpp" -; using std::vector; namespace tket { diff --git a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp new file mode 100644 index 0000000000..87e33d2595 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp @@ -0,0 +1,62 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ArchitectureEdgesReimplementation.hpp" + +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +// This is just copied from Architecture.cpp, +// but we WANT it to remain fixed for testing purposes; +// do NOT keep in sync! +std::vector> get_square_grid_edges( + unsigned dim_r, const unsigned dim_c, const unsigned layers) { + // A trivial injective hash function on the cuboid. + const auto vertex = [dim_r, dim_c, layers]( + unsigned ver, unsigned hor, unsigned l) -> unsigned { + REQUIRE(ver < dim_r); + REQUIRE(hor < dim_c); + REQUIRE(l < layers); + return ver + dim_r * (hor + dim_c * l); + }; + + std::vector> edges; + for (unsigned l = 0; l < layers; l++) { + for (unsigned ver = 0; ver < dim_r; ver++) { + for (unsigned hor = 0; hor < dim_c; hor++) { + const auto n = vertex(ver, hor, l); + if (hor != dim_c - 1) { + const auto h_neighbour = vertex(ver, hor + 1, l); + edges.push_back({n, h_neighbour}); + } + if (ver != dim_r - 1) { + const auto v_neighbour = vertex(ver + 1, hor, l); + edges.push_back({n, v_neighbour}); + } + if (l != layers - 1) { + const auto l_neighbour = vertex(ver, hor, l + 1); + edges.push_back({n, l_neighbour}); + } + } + } + } + return edges; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp new file mode 100644 index 0000000000..b6ddcd8f39 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp @@ -0,0 +1,36 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +// We would like to use the SquareGrid Architecture class, +// but the order of edges is not guaranteed (an implementation detail). +// Therefore, we copy the code to have a single, fixed ordering +// for testing purposes with token swapping. +// NOTE: the only important thing is the order of edges, +// NOT the specific vertex labels. The vertices will be relabelled +// in order of appearance by ArchitectureMapping. +std::vector> get_square_grid_edges( + unsigned dim_r, const unsigned dim_c, const unsigned layers); + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp index e4a0837823..a10bd9def7 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "BestTsaTester.hpp" #include @@ -6,7 +20,6 @@ #include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" #include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" -; using std::vector; namespace tket { @@ -114,7 +127,7 @@ size_t BestTsaTester::get_checked_solution_size( problem_data.vertex_mapping.size() == number_of_vertices; const Architecture arch(m_edges_vect); - const ArchitectureMapping arch_mapping(arch); + const ArchitectureMapping arch_mapping(arch, m_edges_vect); const VertexRelabellingManager relabelling_manager(m_edges_vect); m_raw_swap_list.clear(); m_vertex_mapping_copy = diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp index 1dad2b38aa..65c142b932 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TESTS_TokenSwapping_TestUtils_BestTsaTester_H_ -#define _TKET_TESTS_TokenSwapping_TestUtils_BestTsaTester_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "DecodedProblemData.hpp" #include "TokenSwapping/BestFullTsa.hpp" @@ -51,4 +64,3 @@ class BestTsaTester { } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp index 51fc774618..8f7fb94d6c 100644 --- a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "DecodedProblemData.hpp" #include @@ -5,7 +19,6 @@ #include "TokenSwapping/TSAUtils/GeneralFunctions.hpp" #include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" -; using std::vector; namespace tket { diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp index dcb97ab62b..abc5578ff8 100644 --- a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TESTS_TokenSwapping_TestUtils_DecodedProblemData_H_ -#define _TKET_TESTS_TokenSwapping_TestUtils_DecodedProblemData_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include #include @@ -61,4 +74,3 @@ struct DecodedArchitectureData { } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp index e0b8a60c17..16e7a57425 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "FullTsaTesting.hpp" #include @@ -10,7 +24,6 @@ #include "TokenSwapping/TSAUtils/DistanceFunctions.hpp" #include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" -; using std::vector; namespace tket { @@ -95,8 +108,9 @@ FullTsaTesting::FullTsaTesting() { } void FullTsaTesting::add_problems( - const Architecture& arch, const vector& problems, - const std::string& new_name, RNG& rng, PartialTsaInterface& full_tsa) { + const ArchitectureMapping& arch_mapping, + const vector& problems, const std::string& new_name, + RNG& rng, PartialTsaInterface& full_tsa) { m_number_of_problems += problems.size(); const std::string name_for_this = new_name + ":" + full_tsa.name(); if (m_name.empty()) { @@ -106,7 +120,6 @@ void FullTsaTesting::add_problems( m_name = m_name + ":" + name_for_this; } } - const ArchitectureMapping arch_mapping(arch); DistancesFromArchitecture distances(arch_mapping); NeighboursFromArchitecture neighbours(arch_mapping); RiverFlowPathFinder path_finder(distances, neighbours, rng); diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp index 90a27adc8b..730ba812b8 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp @@ -1,7 +1,20 @@ -#ifndef _TKET_TESTS_TokenSwapping_TestUtils_FullTsaTesting_H_ -#define _TKET_TESTS_TokenSwapping_TestUtils_FullTsaTesting_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -#include "Architecture/Architecture.hpp" +#pragma once + +#include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/PartialTsaInterface.hpp" #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" @@ -18,8 +31,9 @@ class FullTsaTesting { /// Will use the RiverFlowPathFinder /// (which needs an RNG). void add_problems( - const Architecture& arch, const std::vector& problems, - const std::string& name, RNG& rng, PartialTsaInterface& full_tsa); + const ArchitectureMapping& arch_mapping, + const std::vector& problems, const std::string& name, + RNG& rng, PartialTsaInterface& full_tsa); /// A summary of the statistics. std::string str() const; @@ -71,4 +85,3 @@ class FullTsaTesting { } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp index d826768fb2..a0edae3ddb 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp @@ -1,16 +1,28 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "PartialTsaTesting.hpp" #include #include "TestStatsStructs.hpp" -#include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/DistancesFromArchitecture.hpp" #include "TokenSwapping/NeighboursFromArchitecture.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/TSAUtils/DistanceFunctions.hpp" #include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" -; using std::vector; namespace tket { @@ -108,10 +120,10 @@ static std::string run_tests( } std::string run_tests( - const Architecture& arch, const std::vector& problems, + const ArchitectureMapping& arch_mapping, + const std::vector& problems, PathFinderInterface& path_finder, PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, TokenOption token_option) { - const ArchitectureMapping arch_mapping(arch); DistancesFromArchitecture distances(arch_mapping); NeighboursFromArchitecture neighbours(arch_mapping); return run_tests( @@ -120,10 +132,10 @@ std::string run_tests( } std::string run_tests( - const Architecture& arch, const std::vector& problems, - RNG& rng, PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, + const ArchitectureMapping& arch_mapping, + const std::vector& problems, RNG& rng, + PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, TokenOption token_option) { - const ArchitectureMapping arch_mapping(arch); DistancesFromArchitecture distances(arch_mapping); NeighboursFromArchitecture neighbours(arch_mapping); RiverFlowPathFinder path_finder(distances, neighbours, rng); diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp index 39c467693a..fe4c3b9857 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp @@ -1,7 +1,20 @@ -#ifndef _TKET_TESTS_TokenSwapping_TestUtils_PartialTsaTesting_H_ -#define _TKET_TESTS_TokenSwapping_TestUtils_PartialTsaTesting_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -#include "Architecture/Architecture.hpp" +#pragma once + +#include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/PartialTsaInterface.hpp" #include "TokenSwapping/RNG.hpp" @@ -17,7 +30,8 @@ enum class TokenOption { /// Returns a summary string of the results, as well as doing the checks. std::string run_tests( - const Architecture& arch, const std::vector& problems, + const ArchitectureMapping& arch_mapping, + const std::vector& problems, PathFinderInterface& path_finder, PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, TokenOption token_option = TokenOption::DO_NOT_ALLOW_EMPTY_TOKEN_SWAP); @@ -25,11 +39,11 @@ std::string run_tests( /// If no path finder is specified, will use the RiverFlowPathFinder /// (which needs an RNG). std::string run_tests( - const Architecture& arch, const std::vector& problems, - RNG& rng, PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, + const ArchitectureMapping& arch_mapping, + const std::vector& problems, RNG& rng, + PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, TokenOption token_option = TokenOption::DO_NOT_ALLOW_EMPTY_TOKEN_SWAP); } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp index c0dd2507df..7b7caaf558 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp @@ -1,10 +1,23 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "ProblemGeneration.hpp" #include #include "TokenSwapping/TSAUtils/GeneralFunctions.hpp" -; using std::vector; namespace tket { @@ -43,14 +56,14 @@ ProblemGenerator00::ProblemGenerator00() : init_token_density_percentage(1), final_percentage(100), step(1) {} vector ProblemGenerator00::get_problems( - const std::string& arch_name, const Architecture& arch, RNG& rng, + const std::string& arch_name, unsigned number_of_vertices, RNG& rng, // It will calculate a short summary string of the problems // and check against this string; this helps to detect // accidentally changed parameters/generation algorithms // leading to different tests. const std::string& expected_summary) const { REQUIRE(step > 0); - const unsigned num_vertices = arch.n_nodes(); + TSProblemParameters00 params; vector vertex_mappings; @@ -60,12 +73,12 @@ vector ProblemGenerator00::get_problems( for (params.token_density_percentage = init_token_density_percentage; params.token_density_percentage <= final_percentage; params.token_density_percentage += step) { - vertex_mappings.push_back(params.get_problem(rng, num_vertices)); + vertex_mappings.push_back(params.get_problem(rng, number_of_vertices)); tokens_count += vertex_mappings.back().size(); } code = (code << 8) + rng.get_size_t(255); std::stringstream ss; - ss << "[" << arch_name << ": " << code << ": v" << num_vertices << " i" + ss << "[" << arch_name << ": " << code << ": v" << number_of_vertices << " i" << init_token_density_percentage << " f" << final_percentage << " s" << step << ": " << vertex_mappings.size() << " problems; " << tokens_count << " tokens]"; diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp index 8d1dbd7db8..168a149704 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_TESTS_TokenSwapping_TestUtils_ProblemGeneration_H_ -#define _TKET_TESTS_TokenSwapping_TestUtils_ProblemGeneration_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Architecture/Architecture.hpp" #include "TokenSwapping/RNG.hpp" @@ -34,7 +47,7 @@ struct ProblemGenerator00 { ProblemGenerator00(); std::vector get_problems( - const std::string& arch_name, const Architecture& arch, RNG& rng, + const std::string& arch_name, unsigned number_of_vertices, RNG& rng, // It will calculate a short summary string of the problems // and check against this string; this helps to detect // accidentally changed parameters/generation algorithms @@ -63,4 +76,3 @@ struct RandomTreeGenerator00 { } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp index de6094d953..4cff62e020 100644 --- a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp +++ b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp @@ -1,11 +1,23 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "TestStatsStructs.hpp" #include #include #include -; - namespace tket { namespace tsa_internal { namespace tests { diff --git a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp index 8cd1f3afe3..182790a92b 100644 --- a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp +++ b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp @@ -1,5 +1,16 @@ -#ifndef _TKET_TESTS_TokenSwapping_TestUtils_TestStatsStructs_H_ -#define _TKET_TESTS_TokenSwapping_TestUtils_TestStatsStructs_H_ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include #include @@ -41,4 +52,3 @@ struct PartialTsaStatistics { } // namespace tests } // namespace tsa_internal } // namespace tket -#endif diff --git a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp index 30d6cd6a69..1980c5b9f9 100644 --- a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp +++ b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include @@ -21,7 +35,7 @@ SCENARIO("Simple path") { ss << "(" << edge.first << "," << edge.second << ") "; } const Architecture arch(edges); - const ArchitectureMapping arch_mapping(arch); + const ArchitectureMapping arch_mapping(arch, edges); ss << "...\nEdges from arch.mapping:\n"; for (auto edge : arch_mapping.get_edges()) { diff --git a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp index 1ae0ce481d..21a074d24d 100644 --- a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp +++ b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include "Data/FixedCompleteSolutions.hpp" @@ -14,7 +28,6 @@ /// quality too much. // -; using std::vector; namespace tket { diff --git a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp index 061b1c625a..648d220594 100644 --- a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp +++ b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include @@ -5,7 +19,6 @@ #include "TokenSwapping/DistancesFromArchitecture.hpp" using Catch::Matchers::Contains; -; using std::vector; namespace tket { @@ -21,7 +34,7 @@ SCENARIO("Architecture with disconnected graph") { const Architecture arch(edges); // Note: it's a "coincidence" that the vertex numbers are unchanged, // because 0,1,2,3,4,5 are first seen in this order. - const ArchitectureMapping mapping(arch); + const ArchitectureMapping mapping(arch, edges); REQUIRE(mapping.number_of_vertices() == number_of_vertices); DistancesFromArchitecture dist_calculator(mapping); std::stringstream summary; diff --git a/tket/tests/TokenSwapping/test_FullTsa.cpp b/tket/tests/TokenSwapping/test_FullTsa.cpp index c3b7907265..b9aae17bcb 100644 --- a/tket/tests/TokenSwapping/test_FullTsa.cpp +++ b/tket/tests/TokenSwapping/test_FullTsa.cpp @@ -1,5 +1,20 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include +#include "TestUtils/ArchitectureEdgesReimplementation.hpp" #include "TestUtils/FullTsaTesting.hpp" #include "TestUtils/ProblemGeneration.hpp" #include "TokenSwapping/HybridTsa00.hpp" @@ -21,17 +36,30 @@ struct FullTester { std::string test_name; void add_problems( - const Architecture& arch, const std::string& arch_name, + const ArchitectureMapping& arch_mapping, const std::string& arch_name, const std::string& problem_message) { rng.set_seed(); - const auto problems = - generator.get_problems(arch_name, arch, rng, problem_message); + const auto problems = generator.get_problems( + arch_name, arch_mapping.number_of_vertices(), rng, problem_message); // OK to reuse RNG, as it's reset before each problem. - results.add_problems(arch, problems, test_name, rng, full_tsa); + results.add_problems(arch_mapping, problems, test_name, rng, full_tsa); trivial_tsa.set(TrivialTSA::Options::FULL_TSA); - trivial_results.add_problems(arch, problems, test_name, rng, trivial_tsa); + trivial_results.add_problems( + arch_mapping, problems, test_name, rng, trivial_tsa); + } + + void add_problems( + const vector>& edges, + const std::string& arch_name, const std::string& problem_message, + unsigned expected_number_of_vertices = 0) { + const Architecture arch(edges); + const ArchitectureMapping arch_mapping(arch, edges); + if (expected_number_of_vertices != 0) { + REQUIRE(arch_mapping.number_of_vertices() == expected_number_of_vertices); + } + add_problems(arch_mapping, arch_name, problem_message); } }; } // namespace @@ -54,8 +82,7 @@ SCENARIO("Full TSA: stars") { for (unsigned vv = 1; vv <= num_spokes[index]; ++vv) { edges.emplace_back(0, vv); } - const Architecture arch(edges); - tester.add_problems(arch, arch_name, problem_messages[index]); + tester.add_problems(edges, arch_name, problem_messages[index]); } CHECK( tester.results.str() == @@ -94,8 +121,7 @@ SCENARIO("Full TSA: wheels") { edges.emplace_back(vv, vv + 1); } } - const Architecture arch(edges); - tester.add_problems(arch, arch_name, problem_messages[index]); + tester.add_problems(edges, arch_name, problem_messages[index]); } CHECK( tester.results.str() == @@ -124,8 +150,14 @@ SCENARIO("Full TSA: Rings") { for (size_t index = 0; index < problem_messages.size(); ++index) { const RingArch arch(num_vertices[index]); arch_name = "Ring" + std::to_string(num_vertices[index]); - tester.add_problems(arch, arch_name, problem_messages[index]); + const ArchitectureMapping arch_mapping(arch); + tester.add_problems(arch_mapping, arch_name, problem_messages[index]); } + // NOTE: results could change, if RingArch changes vertex labelling + // (outside the control of token swapping). + // However this seems unlikely, since rings are so simple. + // See the comments for "Full TSA: Square Grids" (about + // get_square_grid_edges). CHECK( tester.results.str() == "[Rings:HybridTSA_00: 400 probs; 1802 toks; 3193 tot.lb]\n" @@ -139,7 +171,6 @@ SCENARIO("Full TSA: Rings") { "[Winners: joint: 231 252 394 397 400 394 undisputed: 0 0 0 0 3 0]"); } - SCENARIO("Full TSA: Square Grids") { const vector> grid_parameters = { {2, 2, 2}, {3, 4, 4}}; @@ -152,13 +183,29 @@ SCENARIO("Full TSA: Square Grids") { for (size_t index = 0; index < grid_parameters.size(); ++index) { const auto& parameters = grid_parameters[index]; - const SquareGrid arch(parameters[0], parameters[1], parameters[2]); + + // NOTE: if we used a SquareGrid architecture object, then results + // could change if SquareGrid and/or Architecture changed in future + // (giving different vertex labels, etc.), + // even if the underlying token swapping algorithm is unchanged. + // + // ArchitectureMapping can resolve these issues IF given the original + // vector of EDGES, in the same order as used to construct Architecture. + // The edge vector used to construct a SquareGrid architecture object + // is not available, so we just construct the edges directly, + // to give a fixed test independent of SquareGrid implementation details. + const auto edges = + get_square_grid_edges(parameters[0], parameters[1], parameters[2]); + const Architecture arch(edges); + const ArchitectureMapping arch_mapping(arch, edges); + std::stringstream ss; ss << "Grid(" << parameters[0] << "," << parameters[1] << "," << parameters[2] << ")"; - tester.add_problems(arch, ss.str(), problem_messages[index]); + tester.add_problems(arch_mapping, ss.str(), problem_messages[index]); } + CHECK( tester.results.str() == "[Square grids:HybridTSA_00: 200 probs; 2746 toks; 4323 tot.lb]\n" @@ -190,10 +237,9 @@ SCENARIO("Full TSA: Random trees") { 4 * tree_generator.max_number_of_children; const auto edges = tree_generator.get_tree_edges(tester.rng); - const Architecture arch(edges); - REQUIRE(arch.n_nodes() == edges.size() + 1); arch_name = "Tree" + std::to_string(index); - tester.add_problems(arch, arch_name, problem_messages[index]); + tester.add_problems( + edges, arch_name, problem_messages[index], edges.size() + 1); } CHECK( tester.results.str() == diff --git a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp index 794ebfee7c..239e14d9be 100644 --- a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp +++ b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp @@ -1,13 +1,27 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include +#include "TestUtils/ArchitectureEdgesReimplementation.hpp" #include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/DistancesFromArchitecture.hpp" #include "TokenSwapping/NeighboursFromArchitecture.hpp" #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" -; using std::vector; namespace tket { @@ -207,8 +221,8 @@ SCENARIO("Test path generation for cycles") { // Deliberately use the same RNG, so it's all mixed up; // but we still expect not so many different paths. -static void test(TestResult& result, const Architecture& arch, RNG& rng) { - const ArchitectureMapping arch_mapping(arch); +static void test( + TestResult& result, const ArchitectureMapping& arch_mapping, RNG& rng) { DistancesFromArchitecture distances(arch_mapping); NeighboursFromArchitecture neighbours(arch_mapping); RiverFlowPathFinder path_finder(distances, neighbours, rng); @@ -222,7 +236,8 @@ SCENARIO("Path generation for ring graph") { RNG rng; TestResult result; const RingArch arch(7); - test(result, arch, rng); + const ArchitectureMapping arch_mapping(arch); + test(result, arch_mapping, rng); REQUIRE(result.str() == "[ Number of path calls: 490 Extra paths: 0 ]"); } @@ -232,9 +247,10 @@ SCENARIO("Path generation for square grids") { for (size_t ver = 2; ver <= 4; ver += 2) { for (size_t hor = 1; hor <= 5; hor += 2) { for (size_t layer = 1; layer <= 3; layer += 2) { - const SquareGrid arch(ver, hor, layer); - INFO("Square grid: " << ver << ", " << hor << ", " << layer); - test(result, arch, rng); + const auto edges = get_square_grid_edges(ver, hor, layer); + const Architecture arch(edges); + const ArchitectureMapping arch_mapping(arch, edges); + test(result, arch_mapping, rng); } } } diff --git a/tket/tests/TokenSwapping/test_SwapList.cpp b/tket/tests/TokenSwapping/test_SwapList.cpp index ba660b724c..bd685d2c49 100644 --- a/tket/tests/TokenSwapping/test_SwapList.cpp +++ b/tket/tests/TokenSwapping/test_SwapList.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include diff --git a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp index 12cb570cb2..7c76a4687f 100644 --- a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp +++ b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include @@ -7,7 +21,6 @@ #include "TokenSwapping/SwapListOptimiser.hpp" #include "TokenSwapping/TSAUtils/DebugFunctions.hpp" -; using std::vector; namespace tket { diff --git a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp index 9b88c48519..503338a742 100644 --- a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp +++ b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp @@ -1,5 +1,20 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include +#include "TestUtils/ArchitectureEdgesReimplementation.hpp" #include "TestUtils/PartialTsaTesting.hpp" #include "TestUtils/ProblemGeneration.hpp" #include "TokenSwapping/CyclesPartialTsa.hpp" @@ -8,7 +23,6 @@ #include "TokenSwapping/TSAUtils/DebugFunctions.hpp" #include "TokenSwapping/TrivialTSA.hpp" -; using std::vector; namespace tket { @@ -25,23 +39,25 @@ struct Tester { mutable CyclesPartialTsa cycles_tsa; void run_test( - const Architecture& arch, const vector& problems, - size_t index) const { + const ArchitectureMapping& arch_mapping, + const vector& problems, size_t index) const { trivial_tsa.set(TrivialTSA::Options::FULL_TSA); CHECK( run_tests( - arch, problems, rng, trivial_tsa, RequiredTsaProgress::FULL) == - messages_full_trivial_tsa[index]); + arch_mapping, problems, rng, trivial_tsa, + RequiredTsaProgress::FULL) == messages_full_trivial_tsa[index]); trivial_tsa.set(TrivialTSA::Options::BREAK_AFTER_PROGRESS); CHECK( run_tests( - arch, problems, rng, trivial_tsa, RequiredTsaProgress::NONZERO) == + arch_mapping, problems, rng, trivial_tsa, + RequiredTsaProgress::NONZERO) == messages_partial_trivial_tsa[index]); CHECK( - run_tests(arch, problems, rng, cycles_tsa, RequiredTsaProgress::NONE) == - messages_cycles_tsa_0[index]); + run_tests( + arch_mapping, problems, rng, cycles_tsa, + RequiredTsaProgress::NONE) == messages_cycles_tsa_0[index]); } }; @@ -163,9 +179,10 @@ SCENARIO("Partial TSA: Rings") { // OK to reuse RNG, as it's reset before each problem. tester.rng.set_seed(); const auto problems = generator.get_problems( - arch_name, arch, tester.rng, problem_messages[index]); + arch_name, num_vertices, tester.rng, problem_messages[index]); - tester.run_test(arch, problems, index); + const ArchitectureMapping arch_mapping(arch); + tester.run_test(arch_mapping, problems, index); } } @@ -214,16 +231,21 @@ SCENARIO("Partial TSA: Square grid") { for (size_t index = 0; index < grid_parameters.size(); ++index) { const auto& parameters = grid_parameters[index]; - const SquareGrid arch(parameters[0], parameters[1], parameters[2]); + + const auto edges = + get_square_grid_edges(parameters[0], parameters[1], parameters[2]); + const Architecture arch(edges); + const ArchitectureMapping arch_mapping(arch, edges); + std::stringstream ss; ss << "Grid(" << parameters[0] << "," << parameters[1] << "," << parameters[2] << ")"; tester.rng.set_seed(); const auto problems = generator.get_problems( - ss.str(), arch, tester.rng, problem_messages[index]); + ss.str(), arch.n_nodes(), tester.rng, problem_messages[index]); - tester.run_test(arch, problems, index); + tester.run_test(arch_mapping, problems, index); } } diff --git a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp index 6a8d6472aa..238b5e38a6 100644 --- a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp +++ b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include @@ -5,7 +19,6 @@ #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/VectorListHybrid.hpp" -; using std::vector; namespace tket { diff --git a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp index e905b6a024..d959935366 100644 --- a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp +++ b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include @@ -9,7 +23,6 @@ #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/VectorListHybridSkeleton.hpp" -; using std::vector; namespace tket { diff --git a/tket/tests/TokenSwapping/test_main_entry_functions.cpp b/tket/tests/TokenSwapping/test_main_entry_functions.cpp index 30d381bae6..a0a110a0f6 100644 --- a/tket/tests/TokenSwapping/test_main_entry_functions.cpp +++ b/tket/tests/TokenSwapping/test_main_entry_functions.cpp @@ -1,10 +1,23 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/main_entry_functions.hpp" -; using std::vector; // Detailed algorithmic checks with quantitative benchmarks @@ -78,9 +91,20 @@ SCENARIO("main entry function for TSA") { // Calculate swaps to enact the permutation. const auto node_swaps = get_swaps(arch, node_mapping); + // This will hopefully decrease over time // as we improve the algorithm. - CHECK(node_swaps.size() == 29); + // HOWEVER, apart from the underlying token swapping algorithm, + // there is ANOTHER possible way for this to change: + // Architecture could change the order of nodes returned + // in nodes(), which would cause vertex relabelling and hence + // an isomorphic but different token swapping problem. + // This is UNAVOIDABLE, since get_swaps takes an Architecture + // object, NOT an ArchitectureMapping object. + // This is not really a problem (unless the number of swaps + // changes massively), since the solution is checked + // for correctness. + CHECK(node_swaps.size() == 27); // Go back to the original configuration, and perform the swaps. nodes_copy = nodes; diff --git a/tket/tests/test_CompilerPass.cpp b/tket/tests/test_CompilerPass.cpp index c525595e55..812aa3adc3 100644 --- a/tket/tests/test_CompilerPass.cpp +++ b/tket/tests/test_CompilerPass.cpp @@ -251,7 +251,9 @@ SCENARIO("Test making (mostly routing) passes using PassGenerators") { CompilationUnit cu(circ, preds); PlacementPtr pp = std::make_shared(grid); - PassPtr cp_route = gen_full_mapping_pass(grid, pp, {50, 0, 0, 0}); + LexiRouteRoutingMethod lrrm(50); + RoutingMethodPtr rmw = std::make_shared(lrrm); + PassPtr cp_route = gen_full_mapping_pass(grid, pp, {rmw}); PassPtr all_passes = SynthesiseHQS() >> SynthesiseOQC() >> SynthesiseUMD() >> SynthesiseTket() >> cp_route; @@ -800,7 +802,9 @@ SCENARIO("DecomposeArbitrarilyControlledGates test") { SCENARIO("Precomposed passes successfully compose") { GIVEN("gen_directed_cx_routing_pass") { RingArch arc(6); - REQUIRE_NOTHROW(gen_directed_cx_routing_pass(arc)); + LexiRouteRoutingMethod lrrm(50); + RoutingMethodPtr rmw = std::make_shared(lrrm); + REQUIRE_NOTHROW(gen_directed_cx_routing_pass(arc, {rmw})); } } @@ -821,7 +825,9 @@ SCENARIO("Test Pauli Graph Synthesis Pass") { SCENARIO("Compose Pauli Graph synthesis Passes") { RingArch arc(10); - PassPtr dir_pass = gen_directed_cx_routing_pass(arc); + LexiRouteRoutingMethod lrrm(50); + RoutingMethodPtr rmw = std::make_shared(lrrm); + PassPtr dir_pass = gen_directed_cx_routing_pass(arc, {rmw}); GIVEN("Special UCC Synthesis") { PassPtr spec_ucc = gen_special_UCC_synthesis(); REQUIRE_NOTHROW(spec_ucc >> dir_pass); @@ -904,14 +910,16 @@ SCENARIO("Commute measurements to the end of a circuit") { Architecture line({{0, 1}, {1, 2}, {2, 3}}); PlacementPtr pp = std::make_shared(line); - PassPtr route_pass = gen_full_mapping_pass(line, pp); + LexiRouteRoutingMethod lrrm(50); + RoutingMethodPtr rmw = std::make_shared(lrrm); + PassPtr route_pass = gen_full_mapping_pass(line, pp, {rmw}); CompilationUnit cu(test); route_pass->apply(cu); REQUIRE(delay_pass->apply(cu)); Command final_command = cu.get_circ_ref().get_commands()[7]; OpType type = final_command.get_op_ptr()->get_type(); REQUIRE(type == OpType::Measure); - REQUIRE(final_command.get_args().front() == Node(1)); + REQUIRE(final_command.get_args().front() == Node(3)); } } @@ -973,8 +981,10 @@ SCENARIO("CX mapping pass") { REQUIRE(is_classical_map(c_placed)); // Route + LexiRouteRoutingMethod lrrm(50); + RoutingMethodPtr rmw = std::make_shared(lrrm); CompilationUnit cu_route(c_placed); - gen_routing_pass(line)->apply(cu_route); + gen_routing_pass(line, {rmw})->apply(cu_route); const Circuit& c_routed = cu_route.get_circ_ref(); // Rebase again diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index a18a2d127d..15288938b6 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -334,7 +334,7 @@ SCENARIO("Test LexiRouteRoutingMethod") { std::vector commands = mf->circuit_.get_commands(); REQUIRE(commands.size() == 9); Command bridge_c = commands[2]; - unit_vector_t uids = {nodes[5], nodes[2], nodes[8]}; + unit_vector_t uids = {nodes[8], nodes[2], nodes[5]}; REQUIRE(bridge_c.get_args() == uids); REQUIRE(*bridge_c.get_op_ptr() == *get_op_ptr(OpType::BRIDGE)); } @@ -426,12 +426,12 @@ SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { PassPtr dec = gen_decompose_routing_gates_to_cxs_pass(architecture, false); MappingManager mm(shared_arc); - LexiRouteRoutingMethod lrrm(100); std::shared_ptr mf = std::make_shared(copy_circ); - std::vector> vrm = {lrrm}; - REQUIRE(vrm[0].get().check_method(mf, shared_arc)); + std::vector vrm = { + std::make_shared(100)}; + REQUIRE(vrm[0]->check_method(mf, shared_arc)); bool res = mm.route_circuit(circ, vrm); @@ -458,8 +458,8 @@ SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { PassPtr dec = gen_decompose_routing_gates_to_cxs_pass(sg, false); MappingManager mm(shared_arc); - LexiRouteRoutingMethod lrrm(100); - std::vector> vrm = {lrrm}; + std::vector vrm = { + std::make_shared(100)}; bool res = mm.route_circuit(circ, vrm); PredicatePtr routed_correctly = std::make_shared(sg); diff --git a/tket/tests/test_MappingFrontier.cpp b/tket/tests/test_MappingFrontier.cpp index 0cc0e8caed..6676a8b104 100644 --- a/tket/tests/test_MappingFrontier.cpp +++ b/tket/tests/test_MappingFrontier.cpp @@ -522,7 +522,7 @@ SCENARIO("Test MappingFrontier::add_qubit") { circ.rename_units(rename_map); MappingFrontier mf(circ); - mf.add_qubit(nodes[3]); + mf.add_ancilla(nodes[3]); REQUIRE(circ.all_qubits().size() == 4); REQUIRE(mf.circuit_.all_qubits().size() == 4); diff --git a/tket/tests/test_MappingManager.cpp b/tket/tests/test_MappingManager.cpp index fac280924d..c28bf04278 100644 --- a/tket/tests/test_MappingManager.cpp +++ b/tket/tests/test_MappingManager.cpp @@ -37,8 +37,7 @@ SCENARIO("Test MappingManager::route_circuit") { Architecture arc({{node0, node1}, {node1, node2}}); ArchitecturePtr shared_arc = std::make_shared(arc); MappingManager test_mm(shared_arc); - RoutingMethod test_rm; - std::vector> test_vrm = {test_rm}; + std::vector test_vrm = {std::make_shared()}; GIVEN("More qubits than architecture has qubits.") { Circuit circ(5); REQUIRE_THROWS_AS( @@ -65,8 +64,8 @@ SCENARIO("Test MappingManager::route_circuit") { std::map rename_map = { {qubits[0], node0}, {qubits[1], node1}, {qubits[2], node2}}; circ.rename_units(rename_map); - TokenSwappingTester tst; - std::vector> test_ts_rm = {tst}; + std::vector test_ts_rm = { + std::make_shared()}; test_mm.route_circuit(circ, test_ts_rm); std::vector commands = circ.get_commands(); diff --git a/tket/tests/test_Routing.cpp b/tket/tests/test_Routing.cpp index e18f55aad4..e8f57c4a07 100644 --- a/tket/tests/test_Routing.cpp +++ b/tket/tests/test_Routing.cpp @@ -1729,10 +1729,9 @@ SCENARIO("Test barrier is ignored by routing") { add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); add_2qb_gates(circ, OpType::CZ, {{1, 2}, {3, 2}, {3, 1}}); circ.add_barrier({0, 1, 2, 3}); - - RoutingConfig config = {}; PlacementPtr pp = std::make_shared(arc); - PassPtr p = gen_full_mapping_pass(arc, pp, config); + RoutingMethodPtr rmp = std::make_shared(100); + PassPtr p = gen_full_mapping_pass(arc, pp, {rmp}); CompilationUnit cu(circ); p->apply(cu); REQUIRE( diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index c448392b95..f76fbf61b3 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -516,7 +516,8 @@ SCENARIO("Test predicate serializations") { SCENARIO("Test compiler pass serializations") { Architecture arc = SquareGrid(2, 4, 2); - RoutingConfig rcon(20, 6, 3, 2.5); + RoutingMethodPtr rmp = std::make_shared(80); + std::vector rcon = {rmp}; PlacementConfig plcon(5, 20, 100000, 10, 1000); PlacementPtr place = std::make_shared(arc, plcon); std::map qmap = {{Qubit(0), Node(1)}, {Qubit(3), Node(2)}}; @@ -639,7 +640,13 @@ SCENARIO("Test compiler pass serializations") { j_pp["StandardPass"]["name"] = "FullMappingPass"; j_pp["StandardPass"]["architecture"] = arc; j_pp["StandardPass"]["placement"] = place; - j_pp["StandardPass"]["routing_config"] = rcon; + + nlohmann::json config_array; + for (const auto& con : rcon) { + config_array.push_back(*con); + } + + j_pp["StandardPass"]["routing_config"] = config_array; PassPtr loaded = j_pp.get(); pp->apply(cu); loaded->apply(copy); @@ -671,7 +678,11 @@ SCENARIO("Test compiler pass serializations") { j_pp["StandardPass"]["name"] = "CXMappingPass"; j_pp["StandardPass"]["architecture"] = arc; j_pp["StandardPass"]["placement"] = place; - j_pp["StandardPass"]["routing_config"] = rcon; + nlohmann::json config_array; + for (const auto& con : rcon) { + config_array.push_back(*con); + } + j_pp["StandardPass"]["routing_config"] = config_array; j_pp["StandardPass"]["directed"] = true; j_pp["StandardPass"]["delay_measures"] = false; PassPtr loaded = j_pp.get(); diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 07560f1e18..dbf6ea2474 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -49,6 +49,7 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/BestTsaTester.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DecodedProblemData.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/FullTsaTesting.cpp From ad98c38f6e17edf0e124c38b911728734070693c Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Tue, 11 Jan 2022 14:23:40 +0000 Subject: [PATCH 009/146] Remove outdated Routing code from repository (#165) * Add token swapping stage, add test * Update compilation passes to use new routing * Add json serialization * Continue adding JSON serialisation for routing_config * Improve Json definitions * Update JSON Serialization and use of Barrier * Change from reference_wrapper to shared_ptr * Add JSON_DECL for std::vector * format routing_test * Fix up tests and binders for python * Uncoment measurement tests * rename method to merge_ancilla * debug proptest * Make add_qubit add qubit to unit_bimaps_ if not nullptr * Architectures -> Architecture * Install boost on MacOS. * comments to debug * update proptest to support ancillas properly * remove couts * format * Make Unitary dimensions match * add tket assert for comparison * Update test to check value * add_qubit -> add_ancilla * Remove kwargs formatting from argument * Rename Architecture Methods * rename architecture methods * Allow architecture mapping to take original edges, to calculate Node to size_t mapping * add get_square_grid_edges, to allow fixed tests independent of SquareGrid * use ArchitectureMapping and edges in most tests, instead of Architecture * trivial typos, comments, cmake update * add copyright notices, pragma once, remove semicolon typos * update binders for inheritance and docs * format * Remove NodeGraph * update formatting * Update CMakeLists and Setup.py * Use explicit shared_ptr * Refactor Routing module binder Make "FullMappingPass" use a kwargs based argument to get round faulty docs type definitions. * remove trailing whitespace * update clang formatting * reformat file * update orientation of BRIDGE gates * Remove src/Routing Move Placement files into new src/Placement subdirectory, update tests as necessary, move connectivty constraint verification to utils * update tket/pytket to not install or use old pytket.routing * clang formatting * Update mitigation test to use explicit placement * remove binder file * Update conf docs mapping, remove kwargs full mapping pass * update docs for new python modules * Move Verification files to src/Mapping subdirectory * Delete test_Routing.cpp * Update imports for Verification.hpp * Add default argument to LexiRouteRoutingMethod binder Update python tests to use default * upadte Verification.cpp compilation pass * Formatting Co-authored-by: Alec Edgington Co-authored-by: Zen Harper --- pytket/CMakeLists.txt | 1 - pytket/binders/circuit/Circuit/main.cpp | 2 +- pytket/binders/mapping.cpp | 3 +- pytket/binders/placement.cpp | 3 +- pytket/binders/routing.cpp | 75 - pytket/binders/transform.cpp | 1 - pytket/docs/architecture.rst | 5 + pytket/docs/index.rst | 4 +- pytket/docs/{routing.rst => mapping.rst} | 4 +- pytket/docs/placement.rst | 5 + pytket/pytket/__init__.py | 2 +- pytket/pytket/routing/__init__.py | 23 - pytket/setup.py | 1 - pytket/tests/backend_test.py | 7 +- pytket/tests/mapping_test.py | 6 +- pytket/tests/mitigation_test.py | 15 +- pytket/tests/predicates_test.py | 4 +- pytket/tests/routing_test.py | 865 ------ schemas/compiler_pass_v1.json | 3 - tket/src/ArchAwareSynth/Path.hpp | 2 +- tket/src/CMakeLists.txt | 20 +- .../src/{Routing => Mapping}/Verification.cpp | 0 .../src/{Routing => Mapping}/Verification.hpp | 0 tket/src/{Routing => Placement}/Placement.cpp | 0 tket/src/{Routing => Placement}/Placement.hpp | 22 + .../PlacementGraphClasses.cpp | 0 .../Qubit_Placement.cpp | 48 +- .../subgraph_mapping.cpp | 2 +- tket/src/Predicates/PassGenerators.cpp | 2 +- tket/src/Predicates/Predicates.cpp | 3 +- tket/src/Predicates/Predicates.hpp | 1 - tket/src/Routing/Board_Analysis.cpp | 63 - tket/src/Routing/Routing.cpp | 309 -- tket/src/Routing/Routing.hpp | 366 --- tket/src/Routing/Slice_Manipulation.cpp | 236 -- tket/src/Routing/Swap_Analysis.cpp | 606 ---- tket/tests/test_CompilerPass.cpp | 2 +- tket/tests/test_LexiRoute.cpp | 1 - tket/tests/test_Placement.cpp | 2 +- tket/tests/test_Predicates.cpp | 1 + tket/tests/test_Routing.cpp | 2707 ----------------- tket/tests/test_json.cpp | 8 - tket/tests/tkettestsfiles.cmake | 1 - 43 files changed, 124 insertions(+), 5307 deletions(-) delete mode 100644 pytket/binders/routing.cpp create mode 100644 pytket/docs/architecture.rst rename pytket/docs/{routing.rst => mapping.rst} (60%) create mode 100644 pytket/docs/placement.rst delete mode 100644 pytket/pytket/routing/__init__.py delete mode 100644 pytket/tests/routing_test.py rename tket/src/{Routing => Mapping}/Verification.cpp (100%) rename tket/src/{Routing => Mapping}/Verification.hpp (100%) rename tket/src/{Routing => Placement}/Placement.cpp (100%) rename tket/src/{Routing => Placement}/Placement.hpp (93%) rename tket/src/{Routing => Placement}/PlacementGraphClasses.cpp (100%) rename tket/src/{Routing => Placement}/Qubit_Placement.cpp (88%) rename tket/src/{Routing => Placement}/subgraph_mapping.cpp (99%) delete mode 100644 tket/src/Routing/Board_Analysis.cpp delete mode 100644 tket/src/Routing/Routing.cpp delete mode 100644 tket/src/Routing/Routing.hpp delete mode 100644 tket/src/Routing/Slice_Manipulation.cpp delete mode 100644 tket/src/Routing/Swap_Analysis.cpp delete mode 100644 tket/tests/test_Routing.cpp diff --git a/pytket/CMakeLists.txt b/pytket/CMakeLists.txt index 7b9d0ae160..336e1a762b 100644 --- a/pytket/CMakeLists.txt +++ b/pytket/CMakeLists.txt @@ -53,7 +53,6 @@ build_module(circuit binders/circuit/Circuit/main.cpp binders/circuit/Circuit/add_op.cpp binders/circuit/Circuit/add_classical_op.cpp) -build_module(routing binders/routing.cpp) build_module(mapping binders/mapping.cpp) build_module(transform binders/transform.cpp) build_module(predicates binders/predicates.cpp) diff --git a/pytket/binders/circuit/Circuit/main.cpp b/pytket/binders/circuit/Circuit/main.cpp index 9dd21cd146..f3eadea6c2 100644 --- a/pytket/binders/circuit/Circuit/main.cpp +++ b/pytket/binders/circuit/Circuit/main.cpp @@ -26,8 +26,8 @@ #include "Circuit/Command.hpp" #include "Gate/OpPtrFunctions.hpp" #include "Gate/SymTable.hpp" +#include "Mapping/Verification.hpp" #include "Ops/Op.hpp" -#include "Routing/Verification.hpp" #include "Simulation/CircuitSimulator.hpp" #include "UnitRegister.hpp" #include "Utils/Json.hpp" diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index f8d07e75b0..f50eb622e4 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -56,7 +56,8 @@ PYBIND11_MODULE(mapping, m) { "LexiRoute constructor.\n\n:param lookahead: Maximum depth of " "lookahead " "employed when picking SWAP for purpose of logical to physical " - "mapping."); + "mapping.", + py::arg("lookahead") = 10); py::class_( m, "MappingManager", diff --git a/pytket/binders/placement.cpp b/pytket/binders/placement.cpp index 582c098264..63995d702e 100644 --- a/pytket/binders/placement.cpp +++ b/pytket/binders/placement.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "Routing/Placement.hpp" +#include "Placement/Placement.hpp" #include #include @@ -29,7 +29,6 @@ using json = nlohmann::json; namespace tket { -// definitely a better way of doing this ... void amend_config_from_kwargs(NoiseAwarePlacement &pobj, py::kwargs kwargs) { PlacementConfig config_ = pobj.get_config(); diff --git a/pytket/binders/routing.cpp b/pytket/binders/routing.cpp deleted file mode 100644 index 66707b342a..0000000000 --- a/pytket/binders/routing.cpp +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2019-2021 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Routing/Routing.hpp" - -#include -#include -#include -#include - -#include "Architecture/Architecture.hpp" -#include "Circuit/Circuit.hpp" -#include "Transformations/Transform.hpp" -#include "Utils/Json.hpp" -#include "binder_json.hpp" -#include "binder_utils.hpp" -#include "typecast.hpp" - -namespace py = pybind11; -using json = nlohmann::json; - -namespace tket { - -std::pair route( - const Circuit &circuit, const Architecture &arc, py::kwargs kwargs) { - RoutingConfig config = {}; - if (kwargs.contains("swap_lookahead")) - config.depth_limit = py::cast(kwargs["swap_lookahead"]); - if (kwargs.contains("bridge_lookahead")) - config.distrib_limit = py::cast(kwargs["bridge_lookahead"]); - if (kwargs.contains("bridge_interactions")) - config.interactions_limit = - py::cast(kwargs["bridge_interactions"]); - if (kwargs.contains("bridge_exponent")) - config.distrib_exponent = py::cast(kwargs["bridge_exponent"]); - - Routing router(circuit, arc); - Circuit out = router.solve(config).first; - return {out, router.return_final_map()}; -} - -PYBIND11_MODULE(routing, m) { - m.def( - "route", - [](const Circuit &circuit, const Architecture &arc, py::kwargs kwargs) { - return route(circuit, arc, kwargs).first; - }, - "Routes the circuit subject to the connectivity of the input " - "architecture, given configuration settings." - "\n\n:param circuit: The circuit to be routed." - "\n:param architecture: A representation of the qubit connectivity " - "constraints of the device." - "\n:param \\**kwargs: Parameters for routing: " - "(int)swap_lookahead=50, (int)bridge_lookahead=4, " - "(int)bridge_interactions=2, (float)bridge_exponent=0, " - "\n:return: the routed :py:class:`Circuit`", - py::arg("circuit"), py::arg("architecture")); - m.def( - "_route_return_map", - [](const Circuit &circuit, const Architecture &arc, py::kwargs kwargs) { - return route(circuit, arc, kwargs); - }); -} -} // namespace tket diff --git a/pytket/binders/transform.cpp b/pytket/binders/transform.cpp index b6d0c961f8..dae0f69c59 100644 --- a/pytket/binders/transform.cpp +++ b/pytket/binders/transform.cpp @@ -21,7 +21,6 @@ #include #include "Circuit/Circuit.hpp" -#include "Routing/Routing.hpp" #include "Transformations/ContextualReduction.hpp" #include "typecast.hpp" diff --git a/pytket/docs/architecture.rst b/pytket/docs/architecture.rst new file mode 100644 index 0000000000..96b2a421d1 --- /dev/null +++ b/pytket/docs/architecture.rst @@ -0,0 +1,5 @@ +pytket.architecture +================================== +.. automodule:: pytket._tket.architecture + :members: + :special-members: __init__ diff --git a/pytket/docs/index.rst b/pytket/docs/index.rst index c5befeb2f3..58db783cf8 100644 --- a/pytket/docs/index.rst +++ b/pytket/docs/index.rst @@ -165,7 +165,9 @@ Our telemetry data policy can be viewed in the `Telemetry Data Policy`_ page. partition.rst qasm.rst quipper.rst - routing.rst + architecture.rst + placement.rst + mapping.rst transform.rst tailoring.rst zx.rst diff --git a/pytket/docs/routing.rst b/pytket/docs/mapping.rst similarity index 60% rename from pytket/docs/routing.rst rename to pytket/docs/mapping.rst index ef9cfa9905..22b4ed6dfb 100644 --- a/pytket/docs/routing.rst +++ b/pytket/docs/mapping.rst @@ -1,5 +1,5 @@ -pytket.routing +pytket.mapping ================================== -.. automodule:: pytket._tket.routing +.. automodule:: pytket._tket.mapping :members: :special-members: __init__ diff --git a/pytket/docs/placement.rst b/pytket/docs/placement.rst new file mode 100644 index 0000000000..7fd347894d --- /dev/null +++ b/pytket/docs/placement.rst @@ -0,0 +1,5 @@ +pytket.placement +================================== +.. automodule:: pytket._tket.placement + :members: + :special-members: __init__ diff --git a/pytket/pytket/__init__.py b/pytket/pytket/__init__.py index c414c351e1..2d5076deef 100755 --- a/pytket/pytket/__init__.py +++ b/pytket/pytket/__init__.py @@ -20,7 +20,7 @@ Qubit, Bit, ) -import pytket.routing +import pytket.mapping import pytket.architecture import pytket.placement import pytket.transform diff --git a/pytket/pytket/routing/__init__.py b/pytket/pytket/routing/__init__.py deleted file mode 100644 index 417a96f3cb..0000000000 --- a/pytket/pytket/routing/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2019-2021 Cambridge Quantum Computing -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -The routing module provides access to the tket :py:class:`Architecture` structure and -methods for modifying circuits to satisfy the architectural constraints. It also -provides acess to the :py:class:`Placement` constructors for relabelling Circuit qubits -and has some methods for routing circuits. This module is provided in binary form during -the PyPI installation. -""" - -from pytket._tket.routing import * # type: ignore diff --git a/pytket/setup.py b/pytket/setup.py index 1cf3a226ee..1feb554439 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -192,7 +192,6 @@ def build_extension(self, ext): "partition", "pauli", "program", - "routing", "mapping", "transform", "tailoring", diff --git a/pytket/tests/backend_test.py b/pytket/tests/backend_test.py index 85bf4ff0b9..fd8c806cee 100644 --- a/pytket/tests/backend_test.py +++ b/pytket/tests/backend_test.py @@ -24,7 +24,7 @@ from pytket.circuit import Circuit, OpType, BasisOrder, Qubit, Bit, Node # type: ignore from pytket.predicates import CompilationUnit # type: ignore from pytket.passes import PauliSimp, CliffordSimp, ContextSimp # type: ignore -from pytket.routing import route # type: ignore +from pytket.mapping import MappingManager, LexiRouteRoutingMethod # type: ignore from pytket.architecture import Architecture # type: ignore from pytket.utils.outcomearray import OutcomeArray, readout_counts from pytket.utils.prepare import prepare_circuit @@ -525,7 +525,10 @@ def test_postprocess_3() -> None: qbs = [Node("qn", i) for i in range(4)] arc = Architecture([[qbs[i], qbs[i + 1]] for i in range(3)]) c = Circuit(3, 3).H(0).CX(0, 2).measure_all() - rc = route(c, arc) + + mm = MappingManager(arc) + rc = c.copy() + mm.route_circuit(rc, [LexiRouteRoutingMethod()]) n_shots = 100 h = b.process_circuit(b.get_compiled_circuit(c), n_shots=n_shots, postprocess=True) r = b.get_result(h) diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py index 6ecacd633b..9f989b4279 100644 --- a/pytket/tests/mapping_test.py +++ b/pytket/tests/mapping_test.py @@ -103,7 +103,7 @@ def test_LexiRouteRoutingMethod() -> None: nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) test_mm = MappingManager(test_a) - test_mm.route_circuit(test_c, [LexiRouteRoutingMethod(50)]) + test_mm.route_circuit(test_c, [LexiRouteRoutingMethod()]) routed_commands = test_c.get_commands() assert routed_commands[0].op.type == OpType.CX @@ -152,7 +152,7 @@ def test_RoutingMethodCircuit_custom_list() -> None: RoutingMethodCircuit( route_subcircuit_func, check_subcircuit_func_false, 5, 5 ), - LexiRouteRoutingMethod(50), + LexiRouteRoutingMethod(), ], ) routed_commands = test_c.get_commands() @@ -173,7 +173,7 @@ def test_RoutingMethodCircuit_custom_list() -> None: RoutingMethodCircuit( route_subcircuit_func, check_subcircuit_func_true, 5, 5 ), - LexiRouteRoutingMethod(50), + LexiRouteRoutingMethod(), ], ) routed_commands = test_c.get_commands() diff --git a/pytket/tests/mitigation_test.py b/pytket/tests/mitigation_test.py index 00220dff8f..30b78a9a02 100644 --- a/pytket/tests/mitigation_test.py +++ b/pytket/tests/mitigation_test.py @@ -16,9 +16,10 @@ import json from pytket.utils.spam import SpamCorrecter, compress_counts -from pytket.circuit import Node, Circuit # type: ignore -from pytket.routing import route # type: ignore +from pytket.circuit import Node, Circuit, Qubit # type: ignore +from pytket.mapping import MappingManager, LexiRouteRoutingMethod # type: ignore from pytket.architecture import Architecture # type: ignore +from pytket.placement import place_with_map # type: ignore from pytket.passes import DelayMeasures # type: ignore from typing import List, Dict, Counter, Tuple from pytket.utils.outcomearray import OutcomeArray @@ -107,7 +108,11 @@ def test_spam_integration() -> None: assert spam.characterisation_matrices[1].shape == (2, 2) bellcc = Circuit(3, 3).H(0).CX(0, 2).measure_all() - rbell = route(bellcc, arc) + qmap = {Qubit(0): qbs[1], Qubit(1): qbs[2], Qubit(2): qbs[0]} + place_with_map(bellcc, qmap) + mm = MappingManager(arc) + rbell = bellcc.copy() + mm.route_circuit(rbell, [LexiRouteRoutingMethod()]) def check_correction( counts0: Dict[Tuple[int, ...], int], counts1: Dict[Tuple[int, ...], int] @@ -502,7 +507,9 @@ def test_spam_routing() -> None: arc = Architecture([[qbs[i], qbs[i + 1]] for i in range(8)] + [[qbs[0], qbs[4]]]) testc = Circuit(4, 4).H(0).CX(0, 3).CX(1, 2).CX(0, 1).CX(3, 2).measure_all() - routed = route(testc, arc) + routed = testc.copy() + mm = MappingManager(arc) + mm.route_circuit(routed, [LexiRouteRoutingMethod()]) DelayMeasures().apply(routed) readout = routed.qubit_readout diff --git a/pytket/tests/predicates_test.py b/pytket/tests/predicates_test.py index d75bc34b3f..5baedf9856 100644 --- a/pytket/tests/predicates_test.py +++ b/pytket/tests/predicates_test.py @@ -214,7 +214,7 @@ def test_routing_and_placement_pass() -> None: assert seq_pass.apply(cu2) assert cu2.initial_map == expected_map - full_pass = FullMappingPass(arc, pl, config=[LexiRouteRoutingMethod(100)]) + full_pass = FullMappingPass(arc, pl, config=[LexiRouteRoutingMethod()]) cu3 = CompilationUnit(circ.copy()) assert full_pass.apply(cu3) assert cu3.initial_map == expected_map @@ -657,7 +657,7 @@ def sq(a: float, b: float, c: float) -> Circuit: [k.to_list(), v.to_list()] for k, v in qm.items() ] # FullMappingPass - fm_pass = FullMappingPass(arc, placer, config=[LexiRouteRoutingMethod(100)]) + fm_pass = FullMappingPass(arc, placer, config=[LexiRouteRoutingMethod()]) assert fm_pass.to_dict()["pass_class"] == "SequencePass" p_pass = fm_pass.get_sequence()[0] r_pass = fm_pass.get_sequence()[1] diff --git a/pytket/tests/routing_test.py b/pytket/tests/routing_test.py deleted file mode 100644 index 9c67d26ec3..0000000000 --- a/pytket/tests/routing_test.py +++ /dev/null @@ -1,865 +0,0 @@ -# Copyright 2019-2021 Cambridge Quantum Computing -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pathlib import Path -from pytket.circuit import OpType, Qubit, Node, Circuit # type: ignore -from pytket.routing import ( # type: ignore - route, -) -from pytket.placement import LinePlacement, GraphPlacement, NoiseAwarePlacement, Placement, place_with_map # type: ignore -from pytket.architecture import Architecture, SquareGrid, FullyConnected # type: ignore -from pytket.mapping import LexiRouteRoutingMethod # type: ignore -from pytket.predicates import CompilationUnit, NoMidMeasurePredicate # type: ignore -from pytket.passes import ( # type: ignore - DefaultMappingPass, - FullMappingPass, - RoutingPass, - PlacementPass, - CXMappingPass, - AASRouting, - PauliSimp, - CNotSynthType, -) -from pytket.qasm import circuit_from_qasm -from pytket.transform import Transform # type: ignore -import numpy as np -import pytest # type: ignore - -import json - - -def test_architectures() -> None: - basic_index_coupling = [(0, 1), (2, 1), (2, 3), (4, 3)] - basic_index_architecture = Architecture(basic_index_coupling) - basic_index_coupling_convert = [ - (Node(0), Node(1)), - (Node(2), Node(1)), - (Node(2), Node(3)), - (Node(4), Node(3)), - ] - assert basic_index_architecture.coupling == basic_index_coupling_convert - - node_0 = Node("example_register", 0) - node_1 = Node("example_register", 1) - node_2 = Node("example_register", 2) - node_3 = Node("example_register", 3) - basic_uid_coupling = [(node_0, node_1), (node_1, node_2), (node_2, node_3)] - basic_uid_architecture = Architecture(basic_uid_coupling) - assert basic_uid_architecture.coupling == basic_uid_coupling - - square_arc = SquareGrid(2, 2, 2) - assert square_arc.nodes[0] == Node("gridNode", [0, 0, 0]) - assert square_arc.coupling[0] == ( - Node("gridNode", [0, 0, 0]), - Node("gridNode", [0, 1, 0]), - ) - - -def test_architecture_eq() -> None: - coupling = [(1, 2), (3, 4), (0, 6), (0, 3)] - arc = Architecture(coupling) - - assert arc != Architecture([]) - assert arc == Architecture(coupling) - assert arc == Architecture([(Node(i), Node(j)) for (i, j) in coupling]) - assert arc != Architecture([(Node("s", i), Node("s", j)) for (i, j) in coupling]) - - # only Node IDs and coupling matters - g00, g01, g10, g11 = [ - Node("gridNode", [i, j, 0]) for i in range(2) for j in range(2) - ] - sq_arc = Architecture([(g00, g01), (g01, g11), (g00, g10), (g10, g11)]) - assert sq_arc == SquareGrid(2, 2) - assert sq_arc != Architecture([(g00, g01), (g01, g11), (g00, g10)]) - - -def test_fully_connected() -> None: - fc = FullyConnected(3) - assert fc.nodes == [Node("fcNode", i) for i in range(3)] - d = fc.to_dict() - fc1 = FullyConnected.from_dict(d) - assert fc == fc1 - - -def test_arch_types() -> None: - arch = Architecture([(0, 1)]) - assert isinstance(arch, Architecture) - fc = FullyConnected(2) - assert isinstance(fc, FullyConnected) - sg = SquareGrid(2, 2, 2) - assert isinstance(sg, SquareGrid) - - -def test_placements() -> None: - test_coupling = [(0, 1), (1, 2), (1, 3), (4, 1), (4, 5)] - test_architecture = Architecture(test_coupling) - circ = Circuit(6) - for pair in test_coupling: - circ.CX(pair[0], pair[1]) - circ_qbs = circ.qubits - base_pl = Placement(test_architecture) - line_pl = LinePlacement(test_architecture) - graph_pl = GraphPlacement(test_architecture) - base_placed = circ.copy() - line_placed = circ.copy() - graph_placed = circ.copy() - - base_map = base_pl.get_placement_map(circ) - line_map = line_pl.get_placement_map(circ) - graph_map = graph_pl.get_placement_map(circ) - - assert base_map != line_map - assert base_map != graph_map - assert circ.qubits == circ_qbs - - base_pl.place(base_placed) - line_pl.place(line_placed) - graph_pl.place(graph_placed) - - assert line_placed.qubits[0] == line_map[circ_qbs[0]] - assert line_placed.qubits[1] == line_map[circ_qbs[1]] - assert line_placed.qubits[2] == line_map[circ_qbs[2]] - - assert base_placed.qubits[0] == base_map[circ_qbs[0]] - assert base_placed.qubits[1] == base_map[circ_qbs[1]] - assert base_placed.qubits[2] == base_map[circ_qbs[2]] - - assert graph_placed.qubits[0] == graph_map[circ_qbs[0]] - assert graph_placed.qubits[1] == graph_map[circ_qbs[1]] - assert graph_placed.qubits[2] == graph_map[circ_qbs[2]] - - assert circ_qbs != base_placed.qubits - assert circ_qbs != line_placed.qubits - assert circ_qbs != graph_placed.qubits - - base_placed = route(base_placed, test_architecture) - line_placed = route(line_placed, test_architecture) - graph_placed = route(graph_placed, test_architecture) - - assert base_placed.valid_connectivity(test_architecture, False) - assert line_placed.valid_connectivity(test_architecture, False) - assert graph_placed.valid_connectivity(test_architecture, False) - - -def test_placements_serialization() -> None: - with open( - Path(__file__).resolve().parent / "json_test_files" / "placements.json", "r" - ) as f: - dict = json.load(f) - base_pl_serial = dict["base_placement"] - line_pl_serial = dict["line_placement"] - graph_pl_serial = dict["graph_placement"] - noise_pl_serial = dict["noise_placement"] - - assert Placement.from_dict(base_pl_serial).to_dict() == base_pl_serial - assert LinePlacement.from_dict(line_pl_serial).to_dict() == line_pl_serial - assert GraphPlacement.from_dict(graph_pl_serial).to_dict() == graph_pl_serial - assert NoiseAwarePlacement.from_dict(noise_pl_serial).to_dict() == noise_pl_serial - - -def test_placement_config() -> None: - test_coupling = [(0, 1), (1, 2), (1, 3), (4, 1), (4, 5)] - test_architecture = Architecture(test_coupling) - test_pl = GraphPlacement(test_architecture) - test_circuit = Circuit(6) - test_circuit.CX(0, 1) - test_circuit.CX(2, 3) - test_circuit.CX(4, 3) - test_circuit.CX(2, 4) - test_circuit.CX(3, 5) - test_circuit.CX(0, 5) - circ1 = test_circuit.copy() - circ2 = test_circuit.copy() - map1 = test_pl.get_placement_map(test_circuit) - test_pl.place(circ1) - test_pl.modify_config( - max_matches=1, depth_limit=0, max_interaction_edges=2, timeout=100 - ) - map2 = test_pl.get_placement_map(test_circuit) - test_pl.place(circ2) - assert map1 != map2 - circ1 = route(circ1, test_architecture) - circ2 = route(circ2, test_architecture) - assert circ1.n_gates < circ2.n_gates - - -def test_convert_index_mapping() -> None: - test_circuit = Circuit(6) - test_circuit.CX(0, 1) - test_circuit.CX(2, 3) - test_circuit.CX(4, 3) - test_circuit.CX(2, 4) - test_circuit.CX(3, 5) - test_circuit.CX(0, 5) - - c0 = test_circuit.copy() - c1 = test_circuit.copy() - - index_map = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3} - uid_map = {Qubit(i): Node(j) for i, j in index_map.items()} - circ_qbs = test_circuit.qubits - assert uid_map[circ_qbs[0]] == Node(1) - assert uid_map[circ_qbs[1]] == Node(2) - assert uid_map[circ_qbs[2]] == Node(0) - assert uid_map[circ_qbs[3]] == Node(4) - assert uid_map[circ_qbs[4]] == Node(3) - - place_with_map(test_circuit, uid_map) - - new_circ_qbs = test_circuit.qubits - assert circ_qbs != new_circ_qbs - assert new_circ_qbs[0] == Node(0) - assert new_circ_qbs[1] == Node(1) - assert new_circ_qbs[2] == Node(2) - assert new_circ_qbs[3] == Node(3) - assert new_circ_qbs[4] == Node(4) - assert new_circ_qbs[5] == Qubit("unplaced", 0) - - index_map_0 = {0: 5, 1: 4, 2: 0, 3: 1, 4: 3, 5: 2} - index_map_1 = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3, 5: 5} - uid_0 = {Qubit(i): Node(j) for i, j in index_map_0.items()} - uid_1 = {Qubit(i): Node(j) for i, j in index_map_1.items()} - assert uid_0 != uid_1 - - place_with_map(c0, uid_0) - place_with_map(c1, uid_1) - assert c0 != c1 - - -def test_basic_routing() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - - init_map = dict() - init_map[Qubit(0)] = Node(0) - init_map[Qubit(1)] = Node(1) - init_map[Qubit(2)] = Node(2) - init_map[Qubit(3)] = Node(3) - init_map[Qubit(4)] = Node(4) - pl = Placement(arc) - pl.place_with_map(circ, init_map) - out_circ = route(circ, arc, swap_lookahead=50) - assert out_circ.valid_connectivity(arc, False) - assert len(out_circ.get_commands()) == 10 - - -def test_basic_routing_with_line_map() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - lp = LinePlacement(arc) - lp.place(circ) - out_circ = route(circ, arc) - assert out_circ.valid_connectivity(arc, False) - assert len(out_circ.get_commands()) == 6 - - -def test_basic_routing_with_noise_map() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - - oq_fids = [ - [Node(0), 0.999], - [Node(1), 0.999], - [Node(2), 0.999], - [Node(3), 0.999], - [Node(4), 0.999], - ] - tq_fids = [ - [[Node(0), Node(1)], 0.9], - [[Node(1), Node(0)], 0.9], - [[Node(1), Node(2)], 0.89], - [[Node(2), Node(1)], 0.89], - [[Node(2), Node(3)], 0.7], - [[Node(3), Node(2)], 0.7], - [[Node(3), Node(4)], 0.59], - [[Node(4), Node(3)], 0.59], - ] - - tq_errs_dict = { - (Node(0), Node(1)): 0.1, - (Node(1), Node(0)): 0.1, - (Node(1), Node(2)): 0.11, - (Node(2), Node(1)): 0.11, - (Node(2), Node(3)): 0.3, - (Node(3), Node(2)): 0.3, - (Node(3), Node(4)): 0.41, - (Node(4), Node(3)): 0.41, - } - oq_errs_dict = {node: 1.0 - value for node, value in oq_fids} - - nap = NoiseAwarePlacement(arc, oq_errs_dict, tq_errs_dict) - nap.place(circ) - out_circ = route(circ, arc) - assert len(out_circ.get_commands()) == 6 - assert out_circ.valid_connectivity(arc, False) - - -def test_greedy_noise_route() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - - oq_fids = [ - [Node(0), 0.999], - [Node(1), 0.999], - [Node(2), 0.999], - [Node(3), 0.999], - [Node(4), 0.999], - ] - - tq_errs_dict = { - (Node(0), Node(1)): 0.1, - (Node(1), Node(0)): 0.1, - (Node(1), Node(2)): 0.11, - (Node(2), Node(1)): 0.11, - (Node(2), Node(3)): 0.3, - (Node(3), Node(2)): 0.3, - (Node(3), Node(4)): 0.41, - (Node(4), Node(3)): 0.41, - } - oq_errs_dict = {node: 1.0 - value for node, value in oq_fids} - nap = NoiseAwarePlacement(arc, oq_errs_dict, tq_errs_dict) - nap.place(circ) - out_circ = route(circ, arc) - - assert len(out_circ.get_commands()) == 6 - assert out_circ.valid_connectivity(arc, False) - - -def test_decompose_swap_to_cx() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - - init_map = dict() - init_map[Qubit(0)] = Node(0) - init_map[Qubit(1)] = Node(1) - init_map[Qubit(2)] = Node(2) - init_map[Qubit(3)] = Node(3) - init_map[Qubit(4)] = Node(4) - - pl = Placement(arc) - pl.place_with_map(circ, init_map) - - out_circ = route(circ, arc) - assert out_circ.valid_connectivity(arc, False) - Transform.DecomposeSWAPtoCX(arc).apply(out_circ) - assert len(out_circ.get_commands()) == 20 - Transform.DecomposeCXDirected(arc).apply(out_circ) - assert out_circ.valid_connectivity(arc, True) - assert len(out_circ.get_commands()) == 40 - - -def test_commuting_sq_through_swap() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.H(0) - circ.H(1) - circ.H(2) - circ.H(3) - circ.H(4) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - - init_map = dict() - init_map[Qubit(0)] = Node(0) - init_map[Qubit(1)] = Node(1) - init_map[Qubit(2)] = Node(2) - init_map[Qubit(3)] = Node(3) - init_map[Qubit(4)] = Node(4) - - out_circ = route(circ, arc, initial_mapping=init_map) - assert out_circ.valid_connectivity(arc, False) - # oq_fidelities = [ - # [Node(0), OpType.H, 0.9], - # [Node(1), OpType.H, 0.3], - # [Node(2), OpType.H, 0.5], - # [Node(3), OpType.H, 0.67], - # [Node(4), OpType.H, 0.99999], - # ] - - # _commute_single_gates_through_swaps(out_circ,arc,oq_fidelities) TODO: UN COMMENT WHEN THE DEVICE CLASS IS EXPOSED!! - # Transform.CommuteSQThroughSWAP(devi).apply(out_circ) - - Transform.DecomposeSWAPtoCX(arc).apply(out_circ) - Transform.DecomposeCXDirected(arc).apply(out_circ) - assert out_circ.valid_connectivity(arc, True) - - -def test_noncontiguous_arc() -> None: - arc = Architecture([[0, 2]]) - pass1 = DefaultMappingPass(arc) - c = Circuit(2) - pass1.apply(c) - - -def test_noncontiguous_arc_phase_poly() -> None: - # testing non-contiguous ascending named nodes - arc = Architecture([[0, 2]]) - pass1 = AASRouting(arc, lookahead=1) - c = Circuit(2).H(0).H(1) - pass1.apply(c) - assert c.n_gates_of_type(OpType.H) == 2 - assert c.n_gates_of_type(OpType.CX) == 0 - assert c.n_gates_of_type(OpType.CX) == 0 - - -def test_RoutingPass() -> None: - arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) - circ = Circuit(5) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(1, 3) - circ.CX(1, 2) - cu_0 = CompilationUnit(circ) - cu_1 = CompilationUnit(circ) - placer = GraphPlacement(arc) - p_pass = PlacementPass(placer) - r_pass_0 = RoutingPass(arc) - r_pass_1 = RoutingPass(arc) - p_pass.apply(cu_0) - p_pass.apply(cu_1) - r_pass_0.apply(cu_0) - r_pass_1.apply(cu_1) - out_circ_0 = cu_0.circuit - out_circ_1 = cu_1.circuit - assert out_circ_0.valid_connectivity(arc, False, True) - assert out_circ_1.valid_connectivity(arc, False, True) - - -def test_FullMappingPass() -> None: - arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) - circ = Circuit(5) - circ.CX(0, 1).CX(0, 3).CX(2, 4).CX(1, 4).CX(0, 4).CX(2, 1).CX(3, 0) - cu_0 = CompilationUnit(circ) - cu_1 = CompilationUnit(circ) - gp_placer = GraphPlacement(arc) - lp_placer = LinePlacement(arc) - - m_pass_0 = FullMappingPass(arc, gp_placer, config=[LexiRouteRoutingMethod(1)]) - m_pass_1 = FullMappingPass(arc, lp_placer, config=[LexiRouteRoutingMethod(75)]) - m_pass_0.apply(cu_0) - m_pass_1.apply(cu_1) - out_circ_0 = cu_0.circuit - out_circ_1 = cu_1.circuit - assert out_circ_0.valid_connectivity(arc, False, True) - assert out_circ_1.valid_connectivity(arc, False, True) - - -def test_AAS() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc, lookahead=2) - assert pass1.apply(circ) - - -def test_AAS_2() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc) - assert pass1.apply(circ) - - -def test_AAS_3() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc, lookahead=2) - assert pass1.apply(circ) - - -def test_AAS_4() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc) - assert pass1.apply(circ) - - -def test_AAS_5() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc, lookahead=2) - assert pass1.apply(circ) - - -def test_AAS_6() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc) - assert pass1.apply(circ) - - -def test_AAS_7() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc, lookahead=2) - assert pass1.apply(circ) - - -def test_AAS_8() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.CX(0, 1) - circ.H(0) - circ.Z(1) - circ.CX(0, 3) - circ.Rx(1.5, 3) - circ.CX(2, 4) - circ.X(2) - circ.CX(1, 4) - circ.CX(0, 4) - pass1 = AASRouting(arc, lookahead=2) - assert pass1.apply(circ) - - -def test_AAS_9() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8]]) - circ = Circuit(9) - circ.CX(0, 8).CX(8, 1).CX(1, 7).CX(7, 2).CX(2, 6).CX(6, 3).CX(3, 5).CX(5, 4) - circ.Rz(0.5, 4) - pass1 = AASRouting(arc, lookahead=2) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() < 56 - - -def test_AAS_10() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) - circ = Circuit(7) - circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) - circ.Rz(0.5, 3) - pass1 = AASRouting(arc, lookahead=2) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() < 33 - - -def test_AAS_11() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) - circ = Circuit(7) - circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) - circ.Rz(0.5, 3) - pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.SWAP) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() == 119 - - -def test_AAS_12() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) - circ = Circuit(7) - circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) - circ.Rz(0.5, 3) - pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.HamPath) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() == 36 - - -def test_AAS_13() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) - circ = Circuit(7) - circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) - circ.Rz(0.5, 3) - pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() == 28 - - -def test_AAS_14() -> None: - arc = Architecture([[0, 1], [1, 0], [1, 2], [2, 1]]) - circ = Circuit(3).CZ(0, 1) - pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() == 3 - - -def test_AAS_15() -> None: - arc = Architecture([[0, 1], [1, 0], [1, 2], [2, 1]]) - circ = Circuit(2).CZ(0, 1) - pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() == 3 - - -def test_CXMappingPass() -> None: - arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) - circ = Circuit(5) - circ.Y(4).CX(0, 1).S(3).CX(0, 3).H(0).CX(2, 4).CX(1, 4).Y(1).CX(0, 4).CX(2, 1).Z( - 2 - ).CX(3, 0).CX(2, 0).CX(1, 3) - circ.measure_all() - cu_0 = CompilationUnit(circ) - cu_1 = CompilationUnit(circ) - gp_placer = GraphPlacement(arc) - lp_placer = LinePlacement(arc) - m_pass_0 = CXMappingPass( - arc, - gp_placer, - config=[LexiRouteRoutingMethod(20)], - bridge_interactions=10, - directed_cx=True, - ) - m_pass_1 = CXMappingPass(arc, lp_placer, delay_measures=False) - m_pass_0.apply(cu_0) - m_pass_1.apply(cu_1) - out_circ_0 = cu_0.circuit - out_circ_1 = cu_1.circuit - - measure_pred = NoMidMeasurePredicate() - assert measure_pred.verify(cu_0.circuit) == True - assert measure_pred.verify(cu_1.circuit) == False - assert out_circ_0.valid_connectivity(arc, True) - assert out_circ_1.valid_connectivity(arc, False) - - -def test_CXMappingPass_correctness() -> None: - # TKET-1045 - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - placer = NoiseAwarePlacement(arc) - p = CXMappingPass(arc, placer, directed_cx=True, delay_measures=True) - c = Circuit(3).CX(0, 1).CX(1, 2).CCX(2, 1, 0).CY(1, 0).CY(2, 1) - cu = CompilationUnit(c) - p.apply(cu) - c1 = cu.circuit - u1 = c1.get_unitary() - assert all(np.isclose(abs(x), 0) or np.isclose(abs(x), 1) for x in u1.flatten()) - - -def test_place_with_map_twice() -> None: - # TKET-671 - c = Circuit(6).CX(0, 1).CX(2, 3).CX(4, 3).CX(2, 4).CX(3, 5).CX(0, 5) - - index_map = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3} - uid_map = {Qubit(i): Node(j) for i, j in index_map.items()} - c_qbs = c.qubits - assert uid_map[c_qbs[0]] == Node(1) - assert uid_map[c_qbs[1]] == Node(2) - assert uid_map[c_qbs[2]] == Node(0) - assert uid_map[c_qbs[3]] == Node(4) - assert uid_map[c_qbs[4]] == Node(3) - - assert all(qb.reg_name == "q" for qb in c.qubits) - place_with_map(c, uid_map) - assert all(qb.reg_name in ["node", "unplaced"] for qb in c.qubits) - place_with_map(c, uid_map) - assert all(qb.reg_name == "unplaced" for qb in c.qubits) - - -def test_big_placement() -> None: - # TKET-1275 - c = circuit_from_qasm( - Path(__file__).resolve().parent / "qasm_test_files" / "test14.qasm" - ) - arc = Architecture( - [ - [0, 1], - [0, 14], - [1, 0], - [1, 2], - [1, 13], - [2, 1], - [2, 3], - [2, 12], - [3, 2], - [3, 4], - [3, 11], - [4, 3], - [4, 5], - [4, 10], - [5, 4], - [5, 6], - [5, 9], - [6, 5], - [6, 8], - [7, 8], - [8, 6], - [8, 7], - [8, 9], - [9, 5], - [9, 8], - [9, 10], - [10, 4], - [10, 9], - [10, 11], - [11, 3], - [11, 10], - [11, 12], - [12, 2], - [12, 11], - [12, 13], - [13, 1], - [13, 12], - [13, 14], - [14, 0], - [14, 13], - ] - ) - assert PauliSimp().apply(c) - assert DefaultMappingPass(arc).apply(c) - - -def test_CXMappingPass_terminates() -> None: - # TKET-1376 - c = circuit_from_qasm( - Path(__file__).resolve().parent / "qasm_test_files" / "test13.qasm" - ) - arc = Architecture( - [ - [0, 1], - [1, 0], - [1, 2], - [1, 4], - [2, 1], - [2, 3], - [3, 2], - [3, 5], - [4, 1], - [4, 7], - [5, 3], - [5, 8], - [6, 7], - [7, 4], - [7, 6], - [7, 10], - [8, 5], - [8, 9], - [8, 11], - [9, 8], - [10, 7], - [10, 12], - [11, 8], - [11, 14], - [12, 10], - [12, 13], - [12, 15], - [13, 12], - [13, 14], - [14, 11], - [14, 13], - [14, 16], - [15, 12], - [15, 18], - [16, 14], - [16, 19], - [17, 18], - [18, 15], - [18, 17], - [18, 21], - [19, 16], - [19, 20], - [19, 22], - [20, 19], - [21, 18], - [21, 23], - [22, 19], - [22, 25], - [23, 21], - [23, 24], - [24, 23], - [24, 25], - [25, 22], - [25, 24], - [25, 26], - [26, 25], - ] - ) - placer = NoiseAwarePlacement(arc) - placer.modify_config(timeout=10000) - p = CXMappingPass(arc, placer, directed_cx=False, delay_measures=False) - assert p.apply(c) - - -if __name__ == "__main__": - test_architectures() - test_placements() - test_placement_config() - test_convert_index_mapping() - test_basic_routing() - test_basic_routing_with_line_map() - test_commuting_sq_through_swap() - test_decompose_swap_to_cx() - test_greedy_noise_route() - test_basic_routing_with_noise_map() - test_noncontiguous_arc() - test_noncontiguous_arc_phase_poly() - test_RoutingPass() - test_FullMappingPass() - test_CXMappingPass() - test_place_with_map_twice() diff --git a/schemas/compiler_pass_v1.json b/schemas/compiler_pass_v1.json index 38fe5c57f0..57356912c4 100644 --- a/schemas/compiler_pass_v1.json +++ b/schemas/compiler_pass_v1.json @@ -183,9 +183,6 @@ "architecture": { "$ref": "#/definitions/architecture" }, - "routing_config": { - "$ref": "#/definitions/routing_config" - }, "directed": { "type": "boolean", "description": "Whether to consider directedness of the architecture for CXs in \"DecomposeSwapsToCXs\"." diff --git a/tket/src/ArchAwareSynth/Path.hpp b/tket/src/ArchAwareSynth/Path.hpp index 861b45ab45..dd9b84c702 100644 --- a/tket/src/ArchAwareSynth/Path.hpp +++ b/tket/src/ArchAwareSynth/Path.hpp @@ -14,7 +14,7 @@ #pragma once #include "Architecture/Architecture.hpp" -#include "Routing/Placement.hpp" +#include "Placement/Placement.hpp" #include "Utils/MatrixAnalysis.hpp" #include "Utils/UnitID.hpp" namespace tket { diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index ba67512b2c..a1a7a1bf2e 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -73,7 +73,7 @@ set(TKET_OPTYPE_DIR ${TKET_SRC_DIR}/OpType) set(TKET_OPS_DIR ${TKET_SRC_DIR}/Ops) set(TKET_GATE_DIR ${TKET_SRC_DIR}/Gate) set(TKET_SIMULATION_DIR ${TKET_SRC_DIR}/Simulation) -set(TKET_ROUTING_DIR ${TKET_SRC_DIR}/Routing) +set(TKET_PLACEMENT_DIR ${TKET_SRC_DIR}/Placement) set(TKET_MAPPING_DIR ${TKET_SRC_DIR}/Mapping) set(TKET_TOKEN_SWAPPING_DIR ${TKET_SRC_DIR}/TokenSwapping) set(TKET_TRANSFORM_DIR ${TKET_SRC_DIR}/Transformations) @@ -213,17 +213,11 @@ set(TKET_SOURCES ${TKET_TRANSFORM_DIR}/ContextualReduction.cpp ${TKET_TRANSFORM_DIR}/ThreeQubitSquash.cpp - # Routing - ${TKET_ROUTING_DIR}/PlacementGraphClasses.cpp - ${TKET_ROUTING_DIR}/Qubit_Placement.cpp - ${TKET_ROUTING_DIR}/Swap_Analysis.cpp - ${TKET_ROUTING_DIR}/Board_Analysis.cpp - ${TKET_ROUTING_DIR}/Routing.cpp - ${TKET_ROUTING_DIR}/Slice_Manipulation.cpp - ${TKET_ROUTING_DIR}/subgraph_mapping.cpp - ${TKET_ROUTING_DIR}/Placement.cpp - ${TKET_ROUTING_DIR}/Verification.cpp - + # Placement + ${TKET_PLACEMENT_DIR}/PlacementGraphClasses.cpp + ${TKET_PLACEMENT_DIR}/Qubit_Placement.cpp + ${TKET_PLACEMENT_DIR}/subgraph_mapping.cpp + ${TKET_PLACEMENT_DIR}/Placement.cpp # Architecture ${TKET_ARCHITECTURE_DIR}/ArchitectureGraphClasses.cpp ${TKET_ARCHITECTURE_DIR}/Architecture.cpp @@ -236,6 +230,7 @@ set(TKET_SOURCES ${TKET_MAPPING_DIR}/LexicographicalComparison.cpp ${TKET_MAPPING_DIR}/LexiRoute.cpp ${TKET_MAPPING_DIR}/RoutingMethodJson.cpp + ${TKET_MAPPING_DIR}/Verification.cpp # Architecture Aware Synthesis @@ -252,6 +247,7 @@ set(TKET_SOURCES ${TKET_UTILS_DIR}/CosSinDecomposition.cpp ${TKET_UTILS_DIR}/Expression.cpp + # Predicates ${TKET_PREDS_DIR}/Predicates.cpp ${TKET_PREDS_DIR}/CompilationUnit.cpp diff --git a/tket/src/Routing/Verification.cpp b/tket/src/Mapping/Verification.cpp similarity index 100% rename from tket/src/Routing/Verification.cpp rename to tket/src/Mapping/Verification.cpp diff --git a/tket/src/Routing/Verification.hpp b/tket/src/Mapping/Verification.hpp similarity index 100% rename from tket/src/Routing/Verification.hpp rename to tket/src/Mapping/Verification.hpp diff --git a/tket/src/Routing/Placement.cpp b/tket/src/Placement/Placement.cpp similarity index 100% rename from tket/src/Routing/Placement.cpp rename to tket/src/Placement/Placement.cpp diff --git a/tket/src/Routing/Placement.hpp b/tket/src/Placement/Placement.hpp similarity index 93% rename from tket/src/Routing/Placement.hpp rename to tket/src/Placement/Placement.hpp index 8735f61e6d..c8f36cf2a8 100644 --- a/tket/src/Routing/Placement.hpp +++ b/tket/src/Placement/Placement.hpp @@ -104,6 +104,28 @@ struct PlacementConfig { JSON_DECL(PlacementConfig) +// stores and tracks the points of the circuit up to which has been solved +struct PlacementFrontier { + // set of 2qb vertices which need to be solved for + std::shared_ptr slice; + // Quantum Edges coming in to vertices in slice, indexed by qubit + std::shared_ptr quantum_in_edges; + // Quantum Edges leaving vertices in slice, indexed by qubit + std::shared_ptr quantum_out_edges; + // Boolean edges coming in to vertices in slice. Guarantees that all edges + // into every vertex in slice is represented in next_cut + std::shared_ptr classical_in_edges; + + // reference to circuit that it acts on + const Circuit& circ; + + explicit PlacementFrontier(const Circuit& _circ); + // initialise at front of circuit + void init(); + // move to next slice + void next_slicefrontier(); +}; + // Class for storing interaction graph. // Interacting qubits have an edge between them. class QubitGraph : public graphs::DirectedGraph { diff --git a/tket/src/Routing/PlacementGraphClasses.cpp b/tket/src/Placement/PlacementGraphClasses.cpp similarity index 100% rename from tket/src/Routing/PlacementGraphClasses.cpp rename to tket/src/Placement/PlacementGraphClasses.cpp diff --git a/tket/src/Routing/Qubit_Placement.cpp b/tket/src/Placement/Qubit_Placement.cpp similarity index 88% rename from tket/src/Routing/Qubit_Placement.cpp rename to tket/src/Placement/Qubit_Placement.cpp index 6409e62764..4b85415d86 100644 --- a/tket/src/Routing/Qubit_Placement.cpp +++ b/tket/src/Placement/Qubit_Placement.cpp @@ -21,7 +21,6 @@ #include "Architecture/Architecture.hpp" #include "Graphs/Utils.hpp" #include "Placement.hpp" -#include "Routing.hpp" namespace tket { @@ -38,13 +37,56 @@ std::set interacting_qbs(const Circuit& circ) { return qbs; } +PlacementFrontier::PlacementFrontier(const Circuit& _circ) : circ(_circ) { + init(); +} +void PlacementFrontier::init() { + VertexVec input_slice; + quantum_in_edges = std::make_shared(); + classical_in_edges = std::make_shared(); + + for (const Qubit& qb : circ.all_qubits()) { + Vertex input = circ.get_in(qb); + input_slice.push_back(input); + Edge candidate = circ.get_nth_out_edge(input, 0); + quantum_in_edges->insert({qb, circ.skip_irrelevant_edges(candidate)}); + } + for (const Bit& bit : circ.all_bits()) { + Vertex input = circ.get_in(bit); + EdgeVec candidates = circ.get_nth_b_out_bundle(input, 0); + classical_in_edges->insert({bit, candidates}); + } + + CutFrontier next_cut = circ.next_cut(quantum_in_edges, classical_in_edges); + slice = next_cut.slice; + quantum_out_edges = next_cut.u_frontier; +} + +void PlacementFrontier::next_slicefrontier() { + quantum_in_edges = std::make_shared(); + classical_in_edges = std::make_shared(); + for (const std::pair& pair : quantum_out_edges->get()) { + Edge new_e = circ.skip_irrelevant_edges(pair.second); + quantum_in_edges->insert({pair.first, new_e}); + Vertex targ = circ.target(new_e); + EdgeVec targ_classical_ins = + circ.get_in_edges_of_type(targ, EdgeType::Boolean); + classical_in_edges->insert( + {Bit("frontier_bit", pair.first.index()), targ_classical_ins}); + } + + CutFrontier next_cut = circ.next_cut(quantum_in_edges, classical_in_edges); + slice = next_cut.slice; + quantum_out_edges = next_cut.u_frontier; +} + QubitGraph monomorph_interaction_graph( const Circuit& circ, const unsigned max_edges, unsigned depth_limit) { std::set qubits_considered = interacting_qbs(circ); QubitGraph q_graph(circ.all_qubits()); - RoutingFrontier current_sf(circ); + PlacementFrontier current_sf(circ); unsigned count_edges = 0; for (unsigned slice = 0; slice < depth_limit && count_edges < max_edges && @@ -77,7 +119,7 @@ QubitGraph generate_interaction_graph( const Circuit& circ, unsigned depth_limit) { std::set qubits_considered = interacting_qbs(circ); QubitGraph q_graph(circ.all_qubits()); - RoutingFrontier current_sf(circ); + PlacementFrontier current_sf(circ); for (unsigned slice = 0; slice < depth_limit && !current_sf.slice->empty() && qubits_considered.size() > 1; diff --git a/tket/src/Routing/subgraph_mapping.cpp b/tket/src/Placement/subgraph_mapping.cpp similarity index 99% rename from tket/src/Routing/subgraph_mapping.cpp rename to tket/src/Placement/subgraph_mapping.cpp index b41090fd1a..dcd57b338c 100644 --- a/tket/src/Routing/subgraph_mapping.cpp +++ b/tket/src/Placement/subgraph_mapping.cpp @@ -19,7 +19,7 @@ #include "Architecture/Architecture.hpp" #include "Graphs/Utils.hpp" #include "Placement.hpp" -#include "Routing/Placement.hpp" +#include "Placement/Placement.hpp" #include "Utils/Assert.hpp" #include "Utils/GraphHeaders.hpp" #include "Utils/TketLog.hpp" diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index 4f0c50477d..2155d049a7 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -20,11 +20,11 @@ #include "Converters/PhasePoly.hpp" #include "Mapping/MappingManager.hpp" #include "Mapping/RoutingMethod.hpp" +#include "Placement/Placement.hpp" #include "Predicates/CompilationUnit.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassLibrary.hpp" #include "Predicates/Predicates.hpp" -#include "Routing/Placement.hpp" #include "Transformations/Transform.hpp" #include "Utils/Json.hpp" diff --git a/tket/src/Predicates/Predicates.cpp b/tket/src/Predicates/Predicates.cpp index 7fd631e8b8..b7f1f806fe 100644 --- a/tket/src/Predicates/Predicates.cpp +++ b/tket/src/Predicates/Predicates.cpp @@ -15,7 +15,8 @@ #include "Predicates.hpp" #include "Gate/Gate.hpp" -#include "Routing/Verification.hpp" +#include "Mapping/Verification.hpp" +#include "Placement/Placement.hpp" namespace tket { diff --git a/tket/src/Predicates/Predicates.hpp b/tket/src/Predicates/Predicates.hpp index 3d9afa04fc..579a3682cd 100644 --- a/tket/src/Predicates/Predicates.hpp +++ b/tket/src/Predicates/Predicates.hpp @@ -15,7 +15,6 @@ #pragma once #include -#include "Routing/Routing.hpp" #include "Transformations/Transform.hpp" namespace tket { diff --git a/tket/src/Routing/Board_Analysis.cpp b/tket/src/Routing/Board_Analysis.cpp deleted file mode 100644 index 54b9d2fa22..0000000000 --- a/tket/src/Routing/Board_Analysis.cpp +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019-2021 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Routing.hpp" - -namespace tket { - -bool node_active(const qubit_bimap_t& map, Node node) { - const bool found = map.right.find(node) != map.right.end(); - return found; -} - -Node Routing::find_best_inactive_node( - const Node& target_node, const Architecture& arc) const { - const unsigned diameter = arc.get_diameter(); - for (unsigned k = 1; k <= diameter; k++) { - std::vector potential_nodes = arc.nodes_at_distance(target_node, k); - for (Node potential : potential_nodes) { - if (!node_active(qmap, potential)) { - return potential; - } - } - } - throw ArchitectureFull(); // gotta hope you never get here... -} - -void Routing::activate_node(const Node& node) { - current_arc_.add_node(node); - for (Node neigh : original_arc_.get_neighbour_nodes(node)) { - if (node_active(qmap, neigh)) { - if (original_arc_.edge_exists(node, neigh)) { - current_arc_.add_connection(node, neigh); - } - if (original_arc_.edge_exists(neigh, node)) { - current_arc_.add_connection(neigh, node); - } - } - } -} - -void Routing::reactivate_qubit(const Qubit& qb, const Qubit& target) { - // finds 'best' available node - Node node = find_best_inactive_node(qmap.left.at(target), original_arc_); - - // updates qmap and initial maps to reflect this qb being at that node - activate_node(node); - std::pair new_in = {qb, node}; - qmap.left.insert(new_in); - init_map.left.insert(new_in); -} - -} // namespace tket diff --git a/tket/src/Routing/Routing.cpp b/tket/src/Routing/Routing.cpp deleted file mode 100644 index a12ca16207..0000000000 --- a/tket/src/Routing/Routing.cpp +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2019-2021 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Routing.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "Utils/HelperFunctions.hpp" -#include "Utils/Json.hpp" - -namespace tket { - -bool RoutingConfig::operator==(const RoutingConfig& other) const { - return (this->depth_limit == other.depth_limit) && - (this->distrib_limit == other.distrib_limit) && - (this->interactions_limit == other.interactions_limit) && - (this->distrib_exponent == other.distrib_exponent); -} - -// If unit map is same pre and both routing, then the same placement procedure -// has happened in both cases, and routing is deterministic (!!) so same -// SWAP/Bridges added assuming same config -bool Routing::circuit_modified() const { - if (route_stats.swap_count > 0) return true; - if (route_stats.bridge_count > 0) return true; - if (circ_.boundary != original_boundary) return true; - return false; -} - -/* Class Constructor */ -Routing::Routing(const Circuit& _circ, const Architecture& _arc) - : circ_(_circ), slice_frontier_(circ_), original_arc_(_arc) { - circ_.unit_bimaps_ = _circ.unit_bimaps_; - original_boundary = circ_.boundary; - - current_arc_ = original_arc_; - // Checks for circuit and architecture compatibility - if (circ_.n_qubits() > current_arc_.n_nodes() || current_arc_.n_nodes() < 1) { - throw ArchitectureMismatch(circ_.n_qubits(), current_arc_.n_nodes()); - } - - // Information for placement & running routing with subgraph of architecture - // Initial nodes number - - // Track which nodes are actually active - for (const UnitID& uid : current_arc_.nodes()) { - Node n(uid); - interaction.insert({n, n}); - } -} - -void to_json(nlohmann::json& j, const RoutingConfig& config) { - j["depth_limit"] = config.depth_limit; - j["distrib_limit"] = config.distrib_limit; - j["interactions_limit"] = config.interactions_limit; - j["distrib_exponent"] = config.distrib_exponent; -} - -void from_json(const nlohmann::json& j, RoutingConfig& config) { - config.depth_limit = j.at("depth_limit").get(); - config.distrib_limit = j.at("distrib_limit").get(); - config.interactions_limit = j.at("interactions_limit").get(); - config.distrib_exponent = j.at("distrib_exponent").get(); -} - -std::vector Routing::get_active_nodes() const { - node_vector_t ret; - ret.reserve(qmap.size()); - for (auto [qb, n] : qmap.left) { - ret.push_back(n); - } - return ret; -} - -qubit_mapping_t Routing::return_final_map() const { - return bimap_to_map(final_map.left); -} - -qubit_mapping_t Routing::return_initial_map() const { - return bimap_to_map(init_map.left); -} - -bool subgraph_remove_if_connected( - Architecture& arc, const Architecture& subarc, const Node& node) { - // do not remove if node is in subarc - if (subarc.node_exists(node)) { - return false; - } - if (subarc.n_nodes() > 0) { - node_set_t ap = arc.get_articulation_points(subarc); - - if (ap.find(node) != ap.end()) { - return false; - } - } - - arc.remove_node(node); - return true; -} - -void remove_unmapped_nodes( - Architecture& arc, qubit_bimap_t& map, Circuit& circ) { - std::vector unmapped_nodes; - std::vector mapped_nodes; - - r_const_iterator_t iend = map.right.end(); - for (const UnitID& uid : arc.nodes()) { - Node n(uid); - r_const_iterator_t find_node = map.right.find(n); - if (find_node == iend) { - unmapped_nodes.push_back(n); - } else { - mapped_nodes.push_back(n); - } - } - Architecture subarc = arc.create_subarch(mapped_nodes); - - // sort mapped nodes from least connected to most (remove least connected - // first) - std::sort( - unmapped_nodes.begin(), unmapped_nodes.end(), [&arc](Node x, Node y) { - return (arc.get_out_degree(x) < arc.get_out_degree(y)); - }); - - qubit_vector_t available; - for (const Qubit& q : circ.all_qubits()) { - if (map.left.find(q) == map.left.end()) { - available.push_back(q); - } - } - - for (const Node& node : unmapped_nodes) { - if (!subgraph_remove_if_connected(arc, subarc, node)) { - // if node can't be removed, map to first unmapped qubit - if (available.empty()) - throw CircuitInvalidity( - "Routing is unable to construct connected placement from partial " - "placement using unplaced logical qubits. Please update the " - "circuit placement to a set of connected physical qubits."); - map.insert({available.front(), node}); - available.erase(available.begin()); - } - } -} - -qubit_mapping_t get_qmap_from_circuit(Architecture& arc, Circuit& circ) { - qubit_vector_t all_qbs = circ.all_qubits(); - node_set_t all_nodes = arc.nodes(); - - qubit_mapping_t qubit_map; - for (Qubit q : all_qbs) { - Node n(q); - if (all_nodes.find(n) != all_nodes.end()) { - qubit_map.insert({q, n}); - } - } - return qubit_map; -} - -std::pair Routing::solve(const RoutingConfig& config) { - config_ = config; - qubit_mapping_t qubit_map = get_qmap_from_circuit(current_arc_, circ_); - slice_frontier_.init(); - if (slice_frontier_.slice->empty()) { - organise_registers_and_maps(); - } else { - // Some nodes are permanently unused due to difference in architecture nodes - // and number of used wires in circuit To account for this, place highest - // numbered wires (i.e. unused) into set bad nodes of architecture - - // Placement method attempts to find a good initial allocation of qubits to - // nodes, aiming to reduce overall circuit depth. The method aims to put - // intreacting qubits in the first few circuit timesteps on adjacent nodes - // If no placement, qubits placed sequentially on nodes i.e. qubit 0 -> node - // 0 etc. - - if (qubit_map.size() != 0) { - init_map.left.insert(qubit_map.begin(), qubit_map.end()); - } - remove_unmapped_nodes(current_arc_, init_map, circ_); - final_map = remap(init_map); - organise_registers_and_maps(); - } - bool modified = circuit_modified(); - return {circ_, modified}; -} - -// Tidying up of qregisters and initial and final maps after SWAP adding. -void Routing::organise_registers_and_maps() { - // Given all the new empty wires with no home, if a qubit isnt in the initial - // map, find it an unassigned node and shove it there. - auto all_nodes = original_arc_.get_all_nodes_vec(); - unsigned next_ind = 0; - Node next_node = all_nodes[next_ind]; - - for (const Qubit& qb : circ_.all_qubits()) { - if (init_map.left.find(qb) == init_map.left.end()) { - // find next free node - while (init_map.right.count(next_node)) { - next_node = all_nodes[++next_ind]; - if (next_ind == all_nodes.size()) { - throw ArchitectureMismatch(circ_.n_qubits(), current_arc_.n_nodes()); - } - } - init_map.left.insert({qb, next_node}); - final_map.left.insert({qb, next_node}); - } - } - - // Due to the addition of SWAP gates, a qubit path may change, and so it's - // output boundary ordering may not match the input boundary ordering. The - // following updates the output boundary to match the ordering of the final - // slice frontier Make the input boundary match up to node numbering of - // architecture. - boundary_t new_boundary; - qubit_mapping_t reorder_map = bimap_to_map(init_map.left); - for (const std::pair& map : reorder_map) { - Qubit target = final_map.right.at(map.second); - new_boundary.insert( - {map.second, circ_.get_in(map.first), circ_.get_out(target)}); - // Which makes it all nicer - } - // add classical bits to new_boundary - for (auto [it, end] = - circ_.boundary.get().equal_range(UnitType::Bit); - it != end; it++) { - new_boundary.insert(*it); - } - - circ_.boundary = new_boundary; - circ_.update_initial_map(reorder_map); - circ_.update_final_map(bimap_to_map(final_map.left)); -} - -// Remap completes the routing algorithm -// slices passed as copy as 3 pass placement needs original preserved -qubit_bimap_t Routing::remap(const qubit_bimap_t& init) { - qmap = init; - - advance_frontier(); - // The routing algorithm: - // 1) Slices of circuit are parallelised/packed/whatever into 'timesteps' - // 2) Swaps are 'proposed' on edges connected to any nodes housing an - // 'interacting' qubit (interacting -> qubit is in some two qubit interaction - // in timestep 0) 3) A distance heuristic is used to determine whether the - // swap proposed will bring interacting qubits closer 4) If a swap is bring - // interacting qubits together it is compared to a held 'best swap'. The - // comparison is achieved by applying the same distance heuristic over future - // timesteps, until one is deemed strictly better. 5) If a succesful swap is - // found (from 3)), the swap gate is added to the circuit, information on - // which nodes home which qubits is updated and 1)->4) is repeated. 6) If no - // succesful swap is found, Dijkstra's algorithm is used to find a path in - // the graph between two interacting qubits, which the qubits are then swapped - // along. - // ... The pair of interacting qubits in the first timestep with greatest path - // distance between them is chosen. Algorithm then repeats 1)->4). - // for(unsigned count=0;slice_frontier_.slice.size()!=0 && count<2;count++){ - while (!slice_frontier_.slice->empty()) { - SwapResults single_swap = try_all_swaps(current_arc_.get_all_edges_vec()); - if (single_swap.success) { - route_stats.n_try_all_swaps++; - perform_action(single_swap.swap); - } else { - route_stats.n_solve_furthest++; - if (!solve_furthest()) { - throw RoutingFailure(); - } - } - advance_frontier(); - } - - qubit_bimap_t final_qmap; - for (l_const_iterator_t it = qmap.left.begin(); it != qmap.left.end(); ++it) { - Edge e = slice_frontier_.quantum_out_edges->get() - .find(it->first) - ->second; - Vertex v = circ_.target(e); - while (!circ_.detect_final_Op(v)) { - e = circ_.get_next_edge(v, e); - v = circ_.target(e); - } - Qubit out_q(circ_.get_id_from_out(v)); - final_qmap.insert({out_q, it->second}); - } - - return final_qmap; -} - -} // namespace tket diff --git a/tket/src/Routing/Routing.hpp b/tket/src/Routing/Routing.hpp deleted file mode 100644 index 748deabb4d..0000000000 --- a/tket/src/Routing/Routing.hpp +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2019-2021 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include -#include - -#include "Architecture/Architecture.hpp" -#include "Circuit/Circuit.hpp" -#include "Placement.hpp" -#include "Utils/BiMapHeaders.hpp" -#include "Utils/Json.hpp" - -namespace tket { - -// 2 (adjacent) nodes proposed to have their concurrent qubit states swapped -typedef std::pair Swap; -// node i is interacting with element (j) at i, if i==j not interacting -typedef std::map Interactions; -typedef std::vector qubit_map_vector_t; -typedef std::pair, std::pair> - distributed_cx_info; -// TODO remove -// qubit_map_vector_t map2vec(qubit_bimap_t map, unsigned total); -struct SwapResults { // results of try_all_swaps algorithm - bool success; - Swap swap; -}; - -/* Error Handling for Routing Circuits */ -class ArchitectureMismatch : public std::logic_error { - public: - ArchitectureMismatch(unsigned circ_no, unsigned arch_no) - : std::logic_error( - std::to_string(circ_no) + " " + std::to_string(arch_no)) { - tket_log()->error( - "Incorrect number of nodes in the architecture. " - "Qubits in circuit: {}, nodes in architecture: {}", - circ_no, arch_no); - } -}; - -class QMapRange : public std::logic_error { - public: - explicit QMapRange(const std::string &message) : std::logic_error(message) {} -}; - -class NodesRange : public std::logic_error { - public: - NodesRange(int nodes, int qubit) - : std::logic_error(std::to_string(nodes) + " " + std::to_string(qubit)) { - tket_log()->error( - "Qubit indexing larger than number of available qubits." - "Available Qubits: {}, Qubit Index: {}", - nodes, qubit); - } -}; - -class ArchitectureFull : public std::logic_error { - public: - ArchitectureFull() - : std::logic_error( - "No suitable node found in findBestNode => all nodes already " - "used") {} -}; - -class NodeAlreadyActive : public std::logic_error { - public: - explicit NodeAlreadyActive(int node) - : std::logic_error(std::to_string(node)) { - tket_log()->error("Node {} already active.", node); - } -}; - -class NodeInactive : public std::logic_error { - public: - explicit NodeInactive(int node) : std::logic_error(std::to_string(node)) { - tket_log()->error("Node {} inactive.", node); - } -}; - -class RoutingFailure : public std::logic_error { - public: - RoutingFailure() - : std::logic_error( - "Routing failed to complete. Note: Check your architecture " - "is connected.") {} -}; - -class BridgeInvalid : public std::logic_error { - public: - explicit BridgeInvalid(const std::string &message) - : std::logic_error(message) {} -}; - -class BridgePathIncorrect : public std::logic_error { - public: - explicit BridgePathIncorrect(int path_size) - : std::logic_error(std::to_string(path_size)) { - tket_log()->error("Path found has size {} which is invalid.", path_size); - } -}; - -// structure of configuration parameters for routing -struct RoutingConfig { - // circuit look ahead limit for SWAP picking - unsigned depth_limit; - // circuit look ahead limit for Distributed CX gate checking - unsigned distrib_limit; - // number of interactions considered in Distributed CX gate checking - unsigned interactions_limit; - // Whether to use a Distributed CX gate instead of a SWAP and a CX is - // determined by comparing the distance between some interacting pairs of - // qubits with and without the permutation. Changing distrib_exponent changes - // how much later interactions are considered. distrib_exponent < 0 => less - // effect from later interactoins, distrib_exponent > 0 => greater effect, - // distrib_exponent = 0 => no effect - double distrib_exponent; - // Constructors - RoutingConfig( - unsigned _depth_limit, unsigned _distrib_limit, - unsigned _interactions_limit, const double &_distrib_exponent) - : depth_limit(_depth_limit), - distrib_limit(_distrib_limit), - interactions_limit(_interactions_limit), - distrib_exponent(_distrib_exponent) {} - - RoutingConfig() : RoutingConfig(50, 75, 10, 0) {} - - bool operator==(const RoutingConfig &other) const; -}; - -JSON_DECL(RoutingConfig) - -// stores and tracks the points of the circuit up to which has been solved -struct RoutingFrontier { - // set of 2qb vertices which need to be solved for - std::shared_ptr slice; - // Quantum Edges coming in to vertices in slice, indexed by qubit - std::shared_ptr quantum_in_edges; - // Quantum Edges leaving vertices in slice, indexed by qubit - std::shared_ptr quantum_out_edges; - // Boolean edges coming in to vertices in slice. Guarantees that all edges - // into every vertex in slice is represented in next_cut - std::shared_ptr classical_in_edges; - - // reference to circuit that it acts on - const Circuit ˆ - - explicit RoutingFrontier(const Circuit &_circ); - // initialise at front of circuit - void init(); - // move to next slice - void next_slicefrontier(); -}; - -// remove node from architecture as long as subgraph remains connected. Nodes -// not in map from architecture if possible -void remove_unmapped_nodes( - Architecture &arc, qubit_bimap_t &map, Circuit &circ); - -bool subgraph_remove_if_connected( - Architecture &arc, const Architecture &subarc, const Node &node); - -// remove nodes not in map from architecture if possible -void remove_unmapped_nodes( - Architecture &arc, qubit_bimap_t &map, Circuit &circ); - -Circuit autoroute(const Circuit &circ, const Architecture &arc); - -class RoutingTester; -/* Routing class, contains solve method for transforming a circuit such that -all it's multi-qubit interactions are adjacent for some specificed architecture. -*/ -class Routing { - public: - struct Stats { - unsigned n_try_all_swaps; - unsigned n_solve_furthest; - unsigned swap_count; - unsigned bridge_count; - Stats() - : n_try_all_swaps(0), - n_solve_furthest(0), - swap_count(0), - bridge_count(0) {} - }; - - /* Class Constructor */ - Routing(const Circuit &_circ, const Architecture &_arc); - /* Solve Method */ - // solve using default mapping (line_placement) and default config - // Default RoutingConfig provides a set of parameters that use all available - // features of Routing, but are not specialised for a certain architecture: - // depth_limit = 50 - // distrib_limit = 75 - // interactions_limit = 10 - // distrib_exponent = 0 - // This configuration is used for any solve method that does not have config - // specified. - - // solve with default mapping and provided config - std::pair solve(const RoutingConfig &_config = {}); - qubit_bimap_t remap(const qubit_bimap_t &init); - void organise_registers_and_maps(); - - // TODO:: Make relevant and useful again - qubit_mapping_t return_final_map() const; - qubit_mapping_t return_initial_map() const; - /* Getters*/ - std::vector get_active_nodes() const; - - RoutingFrontier get_slicefrontier() const { return slice_frontier_; } - Stats get_stats() const { return route_stats; } - - private: - // Circuit being solved - Circuit circ_; - // RoutingFrontier tracking the position whcih has been solved up to - RoutingFrontier slice_frontier_; - // Configuration settings for routing - RoutingConfig config_; - - // Architecture being solved for and the original architecture given - Architecture current_arc_; - Architecture original_arc_; - - // Which qubits are interacting and total distance of a board state for - // interacting qubits - Interactions interaction; - // Total distance of a board state for interacting qubits - graphs::dist_vec dist_vector; - - Stats route_stats; - - boundary_t original_boundary; - - // Various qubit mappings. Qmap is used as the algorithim proceeds, the - // initial map is assigned from placement and the final map displays where - // qubits end up while routed. Relative mapping is what the final mapping - // would be if initial mapping was sequential. - qubit_bimap_t qmap, init_map, final_map; - - /* Swap_Analysis.cpp methods */ - // Methods used in determining the best Swap for a given board state and - // implementing it - void increment_distance( - graphs::dist_vec &new_dist_vector, const Swap &pair, int increment) const; - graphs::dist_vec generate_distance_vector(const Interactions &inter) const; - graphs::dist_vec update_distance_vector( - const Swap &nodes, graphs::dist_vec new_dist_vector, - const Interactions &inte) const; - const std::pair pair_dists( - const Node &n1, const Node &p1, const Node &n2, const Node &p2) const; - bool swap_decreases(const Swap &nodes, const Interactions &inte) const; - std::vector candidate_swaps( - const std::vector &trial_edges, - const Interactions &inte) const; - std::vector cowtan_et_al_heuristic( - std::vector &candidate_swaps, const graphs::dist_vec &base_dists, - const Interactions &interac) const; - SwapResults try_all_swaps( - const std::vector &trial_edges); - - static void update_qmap(qubit_bimap_t &map, const Swap &swap); - void update_central_nodes( - const Swap &nodes, const Interactions &interac, - distributed_cx_info &candidate_distributed_cx); - - void compare_distributed_cx_distances( - distributed_cx_info &candidate_distributed_cx, - const std::pair, std::vector> &inter_node); - distributed_cx_info check_distributed_cx(const Swap &nodes); - void add_distributed_cx( - const Node &control_node, const Node &target_node, - const Node ¢ral_node); - void add_swap(const Swap &nodes); - void perform_action(const Swap &nodes); - - // Dijkstras algorithm methods - static std::vector path_to_swaps(const std::vector &path); - - bool solve_furthest(); - - /* Slice_Maniupation.cpp methods */ - // find nodes for qubits, activating if necessary - std::vector nodes_from_qubits(const qubit_vector_t &qubs); - // Advances slice frontier past any two_qubit operations on adjacent nodes - bool advance_frontier(); - - bool circuit_modified() const; - - friend class RoutingTester; - // generate interaction vectors from slice_frontiers, qubits=true means return - // qubit interactions rather than node - Interactions generate_interaction_frontier( - const RoutingFrontier &slice_front); - /* Qubit_Placement.cpp methods */ - // Methods for producing a good intial qubit mapping to an architecture from - // given circuit - - // void print_qubitlines(QubitLineList &in); - - /* Board_Analysis.cpp routing methods */ - Node find_best_inactive_node( - const Node &target_node, const Architecture &arc) const; - void activate_node(const Node &node); - void reactivate_qubit(const Qubit &qb, const Qubit &target); -}; - -class RoutingTester { - private: - Routing *router; - - public: - explicit RoutingTester(Routing *_router) : router(_router) {} - - Interactions get_interaction(const RoutingFrontier &sf); - void set_qmap(qubit_bimap_t _qmap); - void next_sf(RoutingFrontier &sf); - Circuit *get_circ(); - void set_config(const RoutingConfig &_config); - // Wrappers of private methods for testing? - void increment_distance( - graphs::dist_vec &new_dist_vector, const Swap &pair, int increment) const; - graphs::dist_vec generate_distance_vector(const Interactions &inter) const; - graphs::dist_vec update_distance_vector( - const Swap &nodes, graphs::dist_vec new_dist_vector, - const Interactions &inte) const; - const std::pair pair_dists( - const Node &n1, const Node &p1, const Node &n2, const Node &p2) const; - bool swap_decreases(const Swap &nodes, const Interactions &inte) const; - std::vector candidate_swaps( - const std::vector &trial_edges, - const Interactions &inte) const; - std::vector cowtan_et_al_heuristic( - std::vector &candidate_swaps, const graphs::dist_vec &base_dists, - const Interactions &interac) const; - void update_qmap(qubit_bimap_t &map, const Swap &swap); - std::vector path_to_swaps(const std::vector &path) const; - qubit_bimap_t set_default_initial_map( - std::optional canonical_node_order = std::nullopt); - void initialise_slicefrontier(); - void add_distributed_cx( - const Node &control_node, const Node &target_node, - const Node ¢ral_node); - distributed_cx_info check_distributed_cx(const Swap &nodes); - void advance_frontier(); - void set_interaction(); -}; - -} // namespace tket diff --git a/tket/src/Routing/Slice_Manipulation.cpp b/tket/src/Routing/Slice_Manipulation.cpp deleted file mode 100644 index 065fe11b9a..0000000000 --- a/tket/src/Routing/Slice_Manipulation.cpp +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2019-2021 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include - -#include "Routing.hpp" - -namespace tket { - -RoutingFrontier::RoutingFrontier(const Circuit& _circ) : circ(_circ) { init(); } -void RoutingFrontier::init() { - VertexVec input_slice; - quantum_in_edges = std::make_shared(); - classical_in_edges = std::make_shared(); - - for (const Qubit& qb : circ.all_qubits()) { - Vertex input = circ.get_in(qb); - input_slice.push_back(input); - Edge candidate = circ.get_nth_out_edge(input, 0); - quantum_in_edges->insert({qb, circ.skip_irrelevant_edges(candidate)}); - } - for (const Bit& bit : circ.all_bits()) { - Vertex input = circ.get_in(bit); - EdgeVec candidates = circ.get_nth_b_out_bundle(input, 0); - classical_in_edges->insert({bit, candidates}); - } - - CutFrontier next_cut = circ.next_cut(quantum_in_edges, classical_in_edges); - slice = next_cut.slice; - quantum_out_edges = next_cut.u_frontier; -} - -void RoutingFrontier::next_slicefrontier() { - quantum_in_edges = std::make_shared(); - classical_in_edges = std::make_shared(); - for (const std::pair& pair : quantum_out_edges->get()) { - Edge new_e = circ.skip_irrelevant_edges(pair.second); - quantum_in_edges->insert({pair.first, new_e}); - Vertex targ = circ.target(new_e); - EdgeVec targ_classical_ins = - circ.get_in_edges_of_type(targ, EdgeType::Boolean); - classical_in_edges->insert( - {Bit("frontier_bit", pair.first.index()), targ_classical_ins}); - } - - CutFrontier next_cut = circ.next_cut(quantum_in_edges, classical_in_edges); - slice = next_cut.slice; - quantum_out_edges = next_cut.u_frontier; -} - -std::vector Routing::nodes_from_qubits(const qubit_vector_t& qubs) { - std::vector nodes; - unsigned start = 0; - if (qmap.empty()) { - Node node0 = *(original_arc_.max_degree_nodes().begin()); - activate_node(node0); - qmap.left.insert({qubs[0], node0}); - init_map.left.insert({qubs[0], node0}); - nodes.push_back(node0); - start++; - } - - for (unsigned i = start; i < qubs.size(); i++) { - l_const_iterator_t node_find = qmap.left.find(qubs[i]); - if (node_find == qmap.left.end()) { - if (i < qubs.size() - 1 && - qmap.left.find(qubs[i + 1]) != - qmap.left.end()) { // TODO: Could this if condition cause some - // nasty non determinism? - reactivate_qubit(qubs[i], qubs[i + 1]); - nodes.push_back(qmap.left.at(qubs[i])); - } else { - if (i != 0) { - reactivate_qubit(qubs[i], qubs[0]); - nodes.push_back(qmap.left.at(qubs[i])); - } else { - reactivate_qubit(qubs[i], qmap.begin()->left); - nodes.push_back(qmap.left.at(qubs[i])); - } - } - } else { - nodes.push_back(node_find->second); - } - } - return nodes; -} - -/* -Advances slice frontier past any two_qubit operations on adjacent nodes -*/ -bool Routing::advance_frontier() { - bool found_adjacent_op = true; - while (found_adjacent_op && !slice_frontier_.slice->empty()) { - found_adjacent_op = false; - for (const Vertex& vert : *slice_frontier_.slice) { - qubit_vector_t qubs; - for (const Edge& q_out : - circ_.get_out_edges_of_type(vert, EdgeType::Quantum)) { - for (const std::pair& pair : - slice_frontier_.quantum_out_edges->get()) { - if (pair.second == q_out) { - qubs.push_back(Qubit(pair.first)); - break; - } - } - } - // Find OpType. If OpType is a Conditional, unpack to find vertex inside. - // If it's nested, this will fail. - OpType vert_type = circ_.get_OpType_from_Vertex(vert); - if (vert_type == OpType::Conditional) { - const Conditional& b = static_cast( - *circ_.get_Op_ptr_from_Vertex(vert)); - vert_type = b.get_op()->get_type(); - } - - // the vertex must be two qubits or a bridge, which we can skip past - - if (qubs.size() != 2 && vert_type != OpType::BRIDGE && - vert_type != OpType::Barrier) { - throw(CircuitInvalidity( - "Vertex has " + std::to_string(qubs.size()) + - " qubits, expected 2.")); - } - // BRIDGE gates are guaranteed to be across 3 adjacent nodes, - // already mapped so can just be read directly from the qmap - // otherwise, qubits may need to be activated first - std::vector nods = nodes_from_qubits(qubs); - - bool all_qbs_adjacent = true; - for (unsigned i = 0; i < nods.size() - 1; i++) { - all_qbs_adjacent &= - (current_arc_.get_distance(nods[i], nods[i + 1]) == 1); - } - if (all_qbs_adjacent || - vert_type == OpType::Barrier) { // if by eachother - found_adjacent_op = true; // i.e. at least one 2qb gate has - // been able to run - // for all qubits skip subsequent single qubit vertices to move - // in edges to be prior to next multiqubit vertex - for (const Qubit& qub : qubs) { - Edge new_e = circ_.skip_irrelevant_edges( - slice_frontier_.quantum_out_edges->find(qub)->second); - slice_frontier_.quantum_in_edges->replace( - slice_frontier_.quantum_in_edges->find(qub), {qub, new_e}); - Vertex targ = circ_.target(new_e); - EdgeVec targ_classical_ins = - circ_.get_in_edges_of_type(targ, EdgeType::Boolean); - Bit b("frontier_bit", qub.index()); - if (slice_frontier_.classical_in_edges->find(b) == - slice_frontier_.classical_in_edges->end()) { - slice_frontier_.classical_in_edges->insert({b, targ_classical_ins}); - } else { - slice_frontier_.classical_in_edges->replace( - slice_frontier_.classical_in_edges->find(b), - {b, targ_classical_ins}); - } - } - } - } - if (found_adjacent_op) { - CutFrontier next_cut = circ_.next_cut( - slice_frontier_.quantum_in_edges, slice_frontier_.classical_in_edges); - slice_frontier_.slice = next_cut.slice; - slice_frontier_.quantum_out_edges = next_cut.u_frontier; - slice_frontier_.classical_in_edges = std::make_shared(); - for (const std::pair& pair : - slice_frontier_.quantum_in_edges->get()) { - Vertex targ = circ_.target(pair.second); - EdgeVec targ_classical_ins = - circ_.get_in_edges_of_type(targ, EdgeType::Boolean); - Bit b("frontier_bit", pair.first.index()); - slice_frontier_.classical_in_edges->insert({b, targ_classical_ins}); - } - } - } - - interaction = generate_interaction_frontier(slice_frontier_); // reset - dist_vector = generate_distance_vector(interaction); - return found_adjacent_op; -} - -Interactions Routing::generate_interaction_frontier( - const RoutingFrontier& slice_front) { - Interactions inter; - for (const UnitID& uid : current_arc_.nodes()) { - Node n(uid); - inter.insert({n, n}); - } - for (const Vertex& vert : *slice_front.slice) { - qubit_vector_t qubs; - for (const Edge& q_out : - circ_.get_out_edges_of_type(vert, EdgeType::Quantum)) { - for (const std::pair& pair : - slice_front.quantum_out_edges->get()) { - if (pair.second == q_out) { - qubs.push_back(Qubit(pair.first)); - break; - } - } - } - // if generate_interaction_frontier called with slice_frontier_ no ops with - // more than two qubits will be present if generate_interaction_frontier - // called with frontier made in try_all_swaps or check_distributed_cx, - // Barrier Op possible. If barrier op in slice, don't add qubits in barrier - // interaction to Interactions - if (qubs.size() != 2) { - if (circ_.get_OpType_from_Vertex(vert) == OpType::Barrier) continue; - throw CircuitInvalidity( - "Vertex has " + std::to_string(qubs.size()) + " qubits, expected 2."); - } - - l_const_iterator_t node0_find = qmap.left.find(qubs[0]); - l_const_iterator_t node1_find = qmap.left.find(qubs[1]); - if (node0_find != qmap.left.end() && node1_find != qmap.left.end()) { - Node one = node0_find->second; - Node two = node1_find->second; - inter[one] = two; - inter[two] = one; - } - } - return inter; -} - -} // namespace tket diff --git a/tket/src/Routing/Swap_Analysis.cpp b/tket/src/Routing/Swap_Analysis.cpp deleted file mode 100644 index 28728d317a..0000000000 --- a/tket/src/Routing/Swap_Analysis.cpp +++ /dev/null @@ -1,606 +0,0 @@ -// Copyright 2019-2021 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Architecture/Architecture.hpp" -#include "Circuit/CircPool.hpp" -#include "Routing/Routing.hpp" - -namespace tket { - -/* Routing Class Methods for picking optimal swaps */ - -/* Overloaded methods for generating distance vectors */ -// Distance vectors comprise of information pertaining to the architectural -// distance between qubits immediately interacting - -// Generates distance vector from input interaction vector -std::vector Routing::generate_distance_vector( - const Interactions &inter) const { - const unsigned n = current_arc_.get_diameter(); - // const unsigned n = active_distance_matrix.maxCoeff(); - if (n < 1) { - throw ArchitectureInvalidity("Architecture has diameter 0."); - } - std::vector dv(n - 1); - for (auto [n1, n2] : inter) { - unsigned dist = current_arc_.get_distance(n1, n2); - if (dist > 1) { - ++dv[n - dist]; - } - } - return dv; -} - -// Returns the distance between n1 and p1 and the distance between n2 and p2, -// distance ordered (greatest first) -const std::pair Routing::pair_dists( - const Node &n1, const Node &p1, const Node &n2, const Node &p2) const { - unsigned curr_dist1 = current_arc_.get_distance(n1, p1); - unsigned curr_dist2 = current_arc_.get_distance(n2, p2); - return (curr_dist1 > curr_dist2) ? std::make_pair(curr_dist1, curr_dist2) - : std::make_pair(curr_dist2, curr_dist1); -} - -// Determines if a proposed swap brings interacting qubits closer, improving -// board state. -bool Routing::swap_decreases( - const Swap &nodes, const Interactions &inte) const { - Node node1 = nodes.first; - Node pair1 = inte.at(node1); - Node node2 = nodes.second; - Node pair2 = inte.at(node2); - - if (pair1 == node2 || (node1 == pair1 && node2 == pair2)) { - return false; - } - const std::pair &curr_dists = - pair_dists(node1, pair1, node2, pair2); - const std::pair &news_dists = - pair_dists(node2, pair1, node1, pair2); - - return news_dists < curr_dists; -} - -// Given swap and distance vector, updates distance vector to reflect increment -// change due to swaps nodes -void Routing::increment_distance( - graphs::dist_vec &new_dist_vector, const Swap &pair, int increment) const { - const unsigned n = current_arc_.get_diameter(); - const unsigned dis_index = - n - current_arc_.get_distance(pair.first, pair.second); - if (dis_index < new_dist_vector.size()) { - new_dist_vector[dis_index] += increment; - } -} - -/* Overloaded method for updating temporary distance vectors due to proposed - * swaps */ -// Updates distance vector from proposed swap using global first slice -// interaction vector solve furthest only at this point ... - -// Updates distance vector from presented interaction vector -graphs::dist_vec Routing::update_distance_vector( - const Swap &nodes, std::vector new_dist_vector, - const Interactions &inte) const { - increment_distance(new_dist_vector, {nodes.first, inte.at(nodes.first)}, -2); - increment_distance( - new_dist_vector, {nodes.second, inte.at(nodes.second)}, -2); - increment_distance(new_dist_vector, {nodes.second, inte.at(nodes.first)}, 2); - increment_distance(new_dist_vector, {nodes.first, inte.at(nodes.second)}, 2); - return new_dist_vector; -} - -// Updates qmap to reflect performed swap -void Routing::update_qmap(qubit_bimap_t &map, const Swap &swap) { - const Qubit qb1 = map.right.at(swap.first); - const Qubit qb2 = map.right.at(swap.second); - map.right.erase(swap.first); - map.right.erase(swap.second); - map.left.insert({qb1, swap.second}); - map.left.insert({qb2, swap.first}); -} - -std::vector Routing::candidate_swaps( - const std::vector &trial_edges, - const Interactions &inte) const { - std::vector potential_swaps; - for (auto [node, adjacent_node] : trial_edges) { - if (inte.at(node) != node || inte.at(adjacent_node) != adjacent_node) { - Swap proposed = {node, adjacent_node}; - if (swap_decreases(proposed, inte)) { - potential_swaps.push_back(proposed); - } - } - } - return potential_swaps; -} - -// Move heuristic in try_all_swaps loop outside, for testing help and easy -// changing? -std::vector Routing::cowtan_et_al_heuristic( - std::vector &candidate_swaps, - const std::vector &base_dists, - const Interactions &interac) const { - const Swap winner = candidate_swaps.back(); - candidate_swaps.pop_back(); - std::vector winner_distances = - update_distance_vector(winner, base_dists, interac); - std::vector smaller_set; - smaller_set.push_back(winner); - for (const Swap &proposed_swap : candidate_swaps) { - const std::vector proposed_distances = - update_distance_vector(proposed_swap, base_dists, interac); - const int comp = - tri_lexicographical_comparison(proposed_distances, winner_distances); - if (comp == -1) { - smaller_set.push_back(proposed_swap); - } else if (comp == 1) { - smaller_set = {proposed_swap}; - winner_distances = proposed_distances; - } - } - return smaller_set; -} - -SwapResults Routing::try_all_swaps(const std::vector - &trial_edges) { // don't need to change - std::vector potential_swaps = candidate_swaps(trial_edges, interaction); - - if (potential_swaps.empty()) return {false, {Node(0), Node(0)}}; - - RoutingFrontier high_sf = slice_frontier_; - - for (unsigned i = 0; i < config_.depth_limit && !high_sf.slice->empty() && - potential_swaps.size() > 1; - i++) { - Interactions interac = - (i == 0) ? interaction : generate_interaction_frontier(high_sf); - std::vector base_dists = - (i == 0) ? dist_vector : generate_distance_vector(interac); - - potential_swaps = - cowtan_et_al_heuristic(potential_swaps, base_dists, interac); - - high_sf.next_slicefrontier(); - } - - return {1, potential_swaps.back()}; -} - -std::vector Routing::path_to_swaps(const std::vector &path) { - const unsigned len = path.size(); - std::vector output_swaps; - if (len > 2) { - unsigned halfway = len / 2; - for (unsigned i = 0; (i < halfway) || ((halfway + 2 + i) < len); i++) { - if (i < halfway) { - Swap sw1 = {path[i], path[i + 1]}; - output_swaps.push_back(sw1); - } - if ((halfway + 2 + i) < len) { - Swap sw2 = {path[len - i - 2], path[len - i - 1]}; - output_swaps.push_back(sw2); - } - } - } - return output_swaps; -} - -// If heuristic can't settle on a suitable single swap or pair of swaps, find a -// path between the two interacting qubits at greatest distance and swap along -// it. -bool Routing::solve_furthest() { - bool success = false; - std::optional max_node; - unsigned max_dist = 0; - for (auto [q1, q2] : interaction) { - unsigned dist = current_arc_.get_distance(q1, q2); - if (dist > max_dist) { - max_dist = dist; - max_node = q1; - } - } - if (!max_node.has_value()) { - throw ArchitectureInvalidity("Architecture is disconnected"); - } - Node root = *max_node; - if (max_dist > 1) { - Node target = interaction.at(root); - const std::vector path = current_arc_.get_path(root, target); - const std::vector swaps_to_perform = path_to_swaps(path); - for (const Swap &swap : swaps_to_perform) { - success = true; - add_swap(swap); - } - } - return success; -} - -void Routing::update_central_nodes( - const Swap &nodes, const Interactions &interac, - distributed_cx_info &candidate_distributed_cx) { - if (candidate_distributed_cx.first.first) { - // TODO: check that there isnt a better way than get_path to do this - std::vector path = - current_arc_.get_path(nodes.first, interac.at(nodes.first)); - candidate_distributed_cx.first.second = path[1]; - if (interac.at(path[1]) != path[1]) { - candidate_distributed_cx.first.first = false; - } - } - if (candidate_distributed_cx.second.first) { - // TODO: this uses solve furthest -> maybe a better way just - // using the distance matrix alone for a speed up? - std::vector path = - current_arc_.get_path(nodes.second, interac.at(nodes.second)); - candidate_distributed_cx.second.second = path[1]; - if (interac.at(path[1]) != path[1]) { - candidate_distributed_cx.second.first = false; - } - } -} - -// Difference in distance between interacting qubits between nodes in SWAP can -// only differ by 1 Compares the difference in distance between all given -// interactions, scales them dependent on how timesteps to interaction, and -// returns whether a distributed cx is desired. -void Routing::compare_distributed_cx_distances( - distributed_cx_info &candidate_distributed_cx, - const std::pair, std::vector> &inter_node) { - std::pair distance_check = {0, 0}; - for (unsigned i = 1; i < inter_node.first.size(); i++) { - distance_check.first += pow(i, config_.distrib_exponent) * - (int(current_arc_.get_distance( - inter_node.second[0], inter_node.first[i])) - - int(current_arc_.get_distance( - inter_node.first[0], inter_node.first[i]))); - } - for (unsigned i = 1; i < inter_node.second.size(); i++) { - distance_check.second += pow(i, config_.distrib_exponent) * - (int(current_arc_.get_distance( - inter_node.first[0], inter_node.second[i])) - - int(current_arc_.get_distance( - inter_node.second[0], inter_node.second[i]))); - } - if (distance_check.first < 0) { - candidate_distributed_cx.first.first = false; - } - if (distance_check.second < 0) { - candidate_distributed_cx.second.first = false; - } -} - -bool check_vertex_is_CX(const Circuit &circ_, const Vertex &v) { - OpType ot = circ_.get_OpType_from_Vertex(v); - if (ot != OpType::CX) { - if (ot == OpType::Conditional) { - const Conditional &b = - static_cast(*circ_.get_Op_ptr_from_Vertex(v)); - if (b.get_op()->get_type() != OpType::CX) { - return false; - } - } else { - return false; - } - } - return true; -} -// Method is supplied with a pair of nods with the intention of being swapped. -// Before this SWAP gate is added, this method considers whether a distributed -// CX gate between interacting qubits distance 2 away from eachother is a better -// option The returned bool pair instructs perform_action whether to add a -// distributed CX gate between nodes.first and its partner node and nodes.second -// and its partner node respectively -distributed_cx_info Routing::check_distributed_cx(const Swap &nodes) { - // 1) Determine which nodes in SWAP gate could complete their CX with a - // distributed CX gate instead - distributed_cx_info candidate_distributed_cx = { - {current_arc_.get_distance(nodes.first, interaction[nodes.first]) == 2, - Node(0)}, - {current_arc_.get_distance(nodes.second, interaction[nodes.second]) == 2, - Node(0)}}; - // 1 pt2) Is the vertex a CX gate or Conditioned CX gate? - auto cx_check = [&](bool candidate, const Qubit &qb) { - if (candidate) - return check_vertex_is_CX( - circ_, - circ_.target(slice_frontier_.quantum_in_edges->find(qb)->second)); - return true; - }; - if (!cx_check( - candidate_distributed_cx.first.first, qmap.right.at(nodes.first))) - return {{false, Node(0)}, {false, Node(0)}}; - if (!cx_check( - candidate_distributed_cx.second.first, qmap.right.at(nodes.second))) - return {{false, Node(0)}, {false, Node(0)}}; - - if (candidate_distributed_cx.first.first || - candidate_distributed_cx.second.first) { - // 2) Find number of next interactions for node in SWAP equivalent to - // config, or reached within depth limit - std::pair, std::vector> inter_node = { - {nodes.first}, {nodes.second}}; - std::pair ni_limit = {0, 0}; - - RoutingFrontier high_sf = slice_frontier_; - - for (unsigned i = 0; i < config_.distrib_limit && !high_sf.slice->empty() && - (ni_limit.first < config_.interactions_limit || - ni_limit.second < config_.interactions_limit); - i++) { - // Find interaction frontier for current slice and find the interacting - // pairs of nodes for incident SWAP gate. - Interactions interac = - (i == 0) ? interaction : generate_interaction_frontier(high_sf); - - if (nodes.first != interac[nodes.first] && - ni_limit.first < config_.interactions_limit) { - inter_node.first.push_back(interac[nodes.first]); - ni_limit.first++; - } - if (nodes.second != interac[nodes.second] && - ni_limit.second < config_.interactions_limit) { - inter_node.second.push_back(interac[nodes.second]); - ni_limit.second++; - } - high_sf.next_slicefrontier(); - } - if (ni_limit.first > 0 && ni_limit.second > 0) { - // 3) Compare difference in distances between interacting qubits given the - // permutation of qubits from added SWAP gate, or not. - compare_distributed_cx_distances(candidate_distributed_cx, inter_node); - if (candidate_distributed_cx.first.first || - candidate_distributed_cx.second.first) { - // 4) If desirable, find the central node of the bridge. - update_central_nodes(nodes, interaction, candidate_distributed_cx); - return candidate_distributed_cx; - } - } - } - return {{false, Node(0)}, {false, Node(0)}}; -} - -// Give a node with a control qubit on it, finds its respective target node and -// node between them, and replaces the CX gate between the control and target -// with a distributed CX -void Routing::add_distributed_cx( - const Node &cx_node_0, const Node &cx_node_1, const Node ¢ral_node) { - // Find interacting node for starting_node, find node between them. Also swap - // control and target node if necessary. - - if (current_arc_.get_distance(cx_node_0, cx_node_1) != 2) { - throw BridgeInvalid("Bridge Nodes are not distance 2 apart."); - } - if (current_arc_.get_distance(cx_node_0, central_node) != 1 || - current_arc_.get_distance(cx_node_1, central_node) != 1) { - throw BridgeInvalid( - "Central BRIDGE node not adjacent to Control and Target " - "nodes."); - } - - route_stats.bridge_count++; - Edge edge_0 = - slice_frontier_.quantum_in_edges->find(qmap.right.at(cx_node_0))->second; - Edge edge_1 = - slice_frontier_.quantum_in_edges->find(qmap.right.at(cx_node_1))->second; - - // Assign control and target nodes from cx_node_0 and cx_node_1 - // Depends on the port ordering of the cx_node_0 and cx_node_1 corresponding - // edges attached to the CX vertex - Node control_node, target_node; - if (circ_.get_ports(edge_1).second < circ_.get_ports(edge_0).second) { - control_node = cx_node_1; - target_node = cx_node_0; - } else { - control_node = cx_node_0; - target_node = cx_node_1; - } - - // Find qubits associated to each node - const Qubit control_qb = qmap.right.at(control_node); - const Qubit central_qb = qmap.right.at(central_node); - const Qubit target_qb = qmap.right.at(target_node); - - // Initialize variables appropriate for substituting Conditionals with CX - // gates to Conditionals with BRIDGE gates. - Op_ptr new_bridge_ptr; - EdgeVec b_in_edges = {}; - OpType gate_op; - std::vector> classical_edge_info = {}; - - Vertex to_be_replaced = slice_frontier_.circ.target( - slice_frontier_.quantum_in_edges->find(control_qb)->second); - // If OpType is a Conditional{CX}, replace with Conditional{BRIDGE} instead - if (circ_.get_OpType_from_Vertex(to_be_replaced) == OpType::Conditional) { - Op_ptr pt = circ_.get_Op_ptr_from_Vertex(to_be_replaced); - const Conditional &b = static_cast( - *circ_.get_Op_ptr_from_Vertex(to_be_replaced)); - gate_op = b.get_op()->get_type(); - new_bridge_ptr = std::make_shared( - get_op_ptr(OpType::BRIDGE, std::vector(), 3), b.get_width(), - b.get_value()); - // Also collect any classical in edges - b_in_edges = circ_.get_in_edges_of_type(to_be_replaced, EdgeType::Boolean); - for (Edge e : b_in_edges) { - classical_edge_info.push_back( - {circ_.source(e), circ_.get_source_port(e), - circ_.get_target_port(e)}); - } - } else { // else make normal bridge - new_bridge_ptr = get_op_ptr(OpType::BRIDGE); - gate_op = circ_.get_OpType_from_Vertex(to_be_replaced); - } - - if (gate_op != OpType::CX) { - throw BridgeInvalid( - "OpType::BRIDGE being substituted for a vertex that isn't " - "OpType::CX. Please rebase two-qubit primitive to CX gate."); - } - // Collect all required Quantum edge information - - Edge control_in_edge = - slice_frontier_.quantum_in_edges->find(control_qb)->second; - Edge control_out_edge = - slice_frontier_.quantum_out_edges->find(control_qb)->second; - Edge central_edge = - slice_frontier_.quantum_in_edges->find(central_qb)->second; - Edge target_in_edge = - slice_frontier_.quantum_in_edges->find(target_qb)->second; - Edge target_out_edge = - slice_frontier_.quantum_out_edges->find(target_qb)->second; - - VertPort control_pred = { - circ_.source(control_in_edge), circ_.get_source_port(control_in_edge)}; - VertPort central_pred = { - circ_.source(central_edge), circ_.get_source_port(central_edge)}; - VertPort target_pred = { - circ_.source(target_in_edge), circ_.get_source_port(target_in_edge)}; - - VertPort control_succ = { - circ_.target(control_out_edge), circ_.get_target_port(control_out_edge)}; - VertPort central_succ = { - circ_.target(central_edge), circ_.get_target_port(central_edge)}; - VertPort target_succ = { - circ_.target(target_out_edge), circ_.get_target_port(target_out_edge)}; - - // remove old vertex, add new vertex - circ_.remove_vertex( - to_be_replaced, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); - Vertex bridge_vert = circ_.add_vertex(new_bridge_ptr); - // add Boolean edges - for (std::tuple vpp : classical_edge_info) { - circ_.add_edge( - {std::get<0>(vpp), std::get<1>(vpp)}, {bridge_vert, std::get<2>(vpp)}, - EdgeType::Boolean); - } - - unsigned num_classicals = classical_edge_info.size(); - // add control qubit in edge - Edge control_in = circ_.add_edge( - control_pred, {bridge_vert, num_classicals}, EdgeType::Quantum); - // add control qubit out edge - Edge control_out = circ_.add_edge( - {bridge_vert, num_classicals}, control_succ, EdgeType::Quantum); - // add central qubit in edge - Edge central_in = circ_.add_edge( - central_pred, {bridge_vert, num_classicals + 1}, EdgeType::Quantum); - // add central qubit out edge - Edge central_out = circ_.add_edge( - {bridge_vert, num_classicals + 1}, central_succ, EdgeType::Quantum); - // add target qubit in edge - Edge target_in = circ_.add_edge( - target_pred, {bridge_vert, num_classicals + 2}, EdgeType::Quantum); - // add target qubit out edge - Edge target_out = circ_.add_edge( - {bridge_vert, num_classicals + 2}, target_succ, EdgeType::Quantum); - - // Remove central_edge which is now going through BRIDGE vertex - circ_.remove_edge(central_edge); - - unit_frontier_t::iterator control_qb_in_it = - slice_frontier_.quantum_in_edges->find(control_qb); - unit_frontier_t::iterator central_qb_in_it = - slice_frontier_.quantum_in_edges->find(central_qb); - unit_frontier_t::iterator target_qb_in_it = - slice_frontier_.quantum_in_edges->find(target_qb); - - slice_frontier_.quantum_in_edges->replace( - control_qb_in_it, {control_qb, control_in}); - slice_frontier_.quantum_in_edges->replace( - central_qb_in_it, {central_qb, central_in}); - slice_frontier_.quantum_in_edges->replace( - target_qb_in_it, {target_qb, target_in}); - - // Update slice frontier out edges - unit_frontier_t::iterator control_qb_out_it = - slice_frontier_.quantum_out_edges->find(control_qb); - unit_frontier_t::iterator central_qb_out_it = - slice_frontier_.quantum_out_edges->find(central_qb); - unit_frontier_t::iterator target_qb_out_it = - slice_frontier_.quantum_out_edges->find(target_qb); - - slice_frontier_.quantum_out_edges->replace( - control_qb_out_it, {control_qb, control_out}); - slice_frontier_.quantum_out_edges->replace( - central_qb_out_it, {central_qb, central_out}); - slice_frontier_.quantum_out_edges->replace( - target_qb_out_it, {target_qb, target_out}); - - // Remove CX vertex from Slice (i.e. VertexVec) in slice_frontier- - slice_frontier_.slice->erase( - std::remove( - slice_frontier_.slice->begin(), slice_frontier_.slice->end(), - to_be_replaced), - slice_frontier_.slice->end()); - slice_frontier_.slice->push_back(bridge_vert); -} - -// Suitable swap found, amend all global constructs -void Routing::add_swap(const Swap &nodes) { - route_stats.swap_count++; - const Qubit qb1 = qmap.right.at(nodes.first); - const Qubit qb2 = qmap.right.at(nodes.second); - - update_qmap(qmap, nodes); - - // --- --X--\ /-- - // = | X - // --- --X--/ \-- - // So we insert a SWAP gate and perform the wire swap by changing the output - // ports - - // find edges using qubits - EdgeVec preds = { - slice_frontier_.quantum_in_edges->find(qb1)->second, - slice_frontier_.quantum_in_edges->find(qb2)->second}; - - Vertex swap_v = circ_.add_vertex(OpType::SWAP); - circ_.rewire(swap_v, preds, {EdgeType::Quantum, EdgeType::Quantum}); - EdgeVec swap_outs = circ_.get_all_out_edges(swap_v); - - circ_.dag[swap_outs[0]].ports.first = 1; - circ_.dag[swap_outs[1]].ports.first = 0; - unit_frontier_t::iterator qb1_in_it = - slice_frontier_.quantum_in_edges->find(qb1); - slice_frontier_.quantum_in_edges->replace(qb1_in_it, {qb1, swap_outs[0]}); - unit_frontier_t::iterator qb2_in_it = - slice_frontier_.quantum_in_edges->find(qb2); - slice_frontier_.quantum_in_edges->replace(qb2_in_it, {qb2, swap_outs[1]}); - unit_frontier_t::iterator qb1_out_it = - slice_frontier_.quantum_out_edges->find(qb1); - unit_frontier_t::iterator qb2_out_it = - slice_frontier_.quantum_out_edges->find(qb2); - if (preds[0] == qb1_out_it->second) { - slice_frontier_.quantum_out_edges->replace(qb1_out_it, {qb1, swap_outs[0]}); - } else if (preds[1] == qb2_out_it->second) { - slice_frontier_.quantum_out_edges->replace(qb2_out_it, {qb2, swap_outs[1]}); - } -} - -void Routing::perform_action(const Swap &nodes) { - distributed_cx_info cdcx = check_distributed_cx(nodes); - if (cdcx.first - .first) { // in current heuristic, both nodes in SWAP being distance - // two from target, and closer to next interaction if not - // permuted is exceptionally rare (never so far...) - Node temp = nodes.first; - add_distributed_cx(temp, interaction[nodes.first], cdcx.first.second); - } else if (cdcx.second.first) { - Node temp = nodes.second; - add_distributed_cx(temp, interaction[nodes.second], cdcx.second.second); - } else { - add_swap(nodes); - } -} -} // namespace tket diff --git a/tket/tests/test_CompilerPass.cpp b/tket/tests/test_CompilerPass.cpp index 812aa3adc3..f6ce8858ba 100644 --- a/tket/tests/test_CompilerPass.cpp +++ b/tket/tests/test_CompilerPass.cpp @@ -18,11 +18,11 @@ #include "Circuit/Circuit.hpp" #include "OpType/OpType.hpp" #include "OpType/OpTypeFunctions.hpp" +#include "Placement/Placement.hpp" #include "Predicates/CompilationUnit.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" #include "Predicates/PassLibrary.hpp" -#include "Routing/Placement.hpp" #include "Simulation/CircuitSimulator.hpp" #include "Simulation/ComparisonFunctions.hpp" #include "Transformations/ContextualReduction.hpp" diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index 15288938b6..1fc08d98c0 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -6,7 +6,6 @@ #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" #include "Predicates/PassLibrary.hpp" -#include "Routing/Routing.hpp" namespace tket { SCENARIO("Test LexiRoute::solve") { diff --git a/tket/tests/test_Placement.cpp b/tket/tests/test_Placement.cpp index bba8bee301..7dddf37605 100644 --- a/tket/tests/test_Placement.cpp +++ b/tket/tests/test_Placement.cpp @@ -15,7 +15,7 @@ #include #include -#include "Routing/Placement.hpp" +#include "Placement/Placement.hpp" #include "testutil.hpp" namespace tket { diff --git a/tket/tests/test_Predicates.cpp b/tket/tests/test_Predicates.cpp index 7c02b7cc9b..172d684c2d 100644 --- a/tket/tests/test_Predicates.cpp +++ b/tket/tests/test_Predicates.cpp @@ -14,6 +14,7 @@ #include +#include "Placement/Placement.hpp" #include "Predicates/CompilationUnit.hpp" #include "Predicates/Predicates.hpp" #include "testutil.hpp" diff --git a/tket/tests/test_Routing.cpp b/tket/tests/test_Routing.cpp deleted file mode 100644 index e8f57c4a07..0000000000 --- a/tket/tests/test_Routing.cpp +++ /dev/null @@ -1,2707 +0,0 @@ -// Copyright 2019-2021 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include - -#include "Characterisation/DeviceCharacterisation.hpp" -#include "Circuit/Circuit.hpp" -#include "OpType/OpType.hpp" -#include "Predicates/CompilerPass.hpp" -#include "Predicates/PassGenerators.hpp" -#include "Predicates/Predicates.hpp" -#include "Routing/Routing.hpp" -#include "Routing/Verification.hpp" -#include "Simulation/CircuitSimulator.hpp" -#include "Simulation/ComparisonFunctions.hpp" -#include "Transformations/Transform.hpp" -#include "Utils/HelperFunctions.hpp" -#include "testutil.hpp" - -namespace tket { - -using Connection = Architecture::Connection; - -Interactions RoutingTester::get_interaction(const RoutingFrontier &sf) { - return router->generate_interaction_frontier(sf); -} - -// Wrappers of private methods for testing? -void RoutingTester::increment_distance( - graphs::dist_vec &new_dist_vector, const Swap &pair, int increment) const { - router->increment_distance(new_dist_vector, pair, increment); -} - -graphs::dist_vec RoutingTester::generate_distance_vector( - const Interactions &inter) const { - return router->generate_distance_vector(inter); -} - -graphs::dist_vec RoutingTester::update_distance_vector( - const Swap &nodes, graphs::dist_vec new_dist_vector, - const Interactions &inte) const { - return router->update_distance_vector(nodes, new_dist_vector, inte); -} - -const std::pair RoutingTester::pair_dists( - const Node &n1, const Node &p1, const Node &n2, const Node &p2) const { - return router->pair_dists(n1, p1, n2, p2); -} - -bool RoutingTester::swap_decreases( - const Swap &nodes, const Interactions &inte) const { - return router->swap_decreases(nodes, inte); -} - -std::vector RoutingTester::candidate_swaps( - const std::vector &trial_edges, - const Interactions &inte) const { - return router->candidate_swaps(trial_edges, inte); -} - -std::vector RoutingTester::cowtan_et_al_heuristic( - std::vector &candidate_swaps, const graphs::dist_vec &base_dists, - const Interactions &interac) const { - return router->cowtan_et_al_heuristic(candidate_swaps, base_dists, interac); -} - -void RoutingTester::update_qmap(qubit_bimap_t &map, const Swap &swap) { - router->update_qmap(map, swap); -} - -std::vector RoutingTester::path_to_swaps( - const std::vector &path) const { - return router->path_to_swaps(path); -} - -qubit_bimap_t default_qubit_map(const Circuit &circ) { - qubit_bimap_t qmap; - unsigned node = 0; - for (const Qubit &qb : circ.all_qubits()) { - qmap.insert({qb, Node(node)}); - node++; - } - return qmap; -} -qubit_bimap_t RoutingTester::set_default_initial_map( - std::optional canonical_node_order) { - qubit_bimap_t qmap; - unsigned node = 0; - for (const Qubit &qb : router->circ_.all_qubits()) { - if (canonical_node_order.has_value()) { - qmap.insert({qb, canonical_node_order->at(node)}); - } else { - qmap.insert({qb, Node(node)}); - } - node++; - } - router->init_map = qmap; - router->qmap = qmap; - return qmap; -} - -void RoutingTester::initialise_slicefrontier() { - router->slice_frontier_.init(); -} - -void RoutingTester::add_distributed_cx( - const Node &control_node, const Node &target_node, - const Node ¢ral_node) { - router->add_distributed_cx(control_node, target_node, central_node); -} - -std::pair, std::pair> -RoutingTester::check_distributed_cx(const Swap &nodes) { - return router->check_distributed_cx(nodes); -} - -void RoutingTester::advance_frontier() { router->advance_frontier(); } - -void RoutingTester::set_interaction() { - router->interaction = - router->generate_interaction_frontier(router->slice_frontier_); -} -void RoutingTester::set_qmap(qubit_bimap_t _qmap) { router->qmap = _qmap; } -void RoutingTester::set_config(const RoutingConfig &_config) { - router->config_ = _config; -} -void RoutingTester::next_sf(RoutingFrontier &sf) { sf.next_slicefrontier(); } -Circuit *RoutingTester::get_circ() { return &(router->circ_); } - -namespace test_Routing { - -SCENARIO( - "Test validity of circuit against architecture using " - "respects_connectivity_constraints method.", - "[routing]") { - Architecture arc({{1, 0}, {1, 2}}); - - GIVEN("A simple CX circuit and a line_placement map.") { - tket::Circuit circ(5); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {0, 3}, {2, 4}, {1, 4}, {0, 4}}); - tket::Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}}); - LinePlacement lp_obj(test_arc); - // qubit_mapping_t lm = lp_obj.place_get_maps(circ)[0]; - lp_obj.place(circ); - tket::Routing router(circ, test_arc); - std::pair outcirc = router.solve(); - REQUIRE(outcirc.second == true); - CHECK(respects_connectivity_constraints(outcirc.first, test_arc, false)); - } - GIVEN("A failing case, undirected") { - Circuit circ(3); - circ.add_op(OpType::CX, {0, 2}); - reassign_boundary(circ); - REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, false)); - } - GIVEN("A working case, undirected") { - Circuit circ(3); - circ.add_op(OpType::CX, {0, 1}); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false)); - } - GIVEN("A failing case, directed") { - Circuit circ(3); - circ.add_op(OpType::CX, {0, 1}); - reassign_boundary(circ); - REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A working case, directed") { - Circuit circ(3); - circ.add_op(OpType::CX, {1, 0}); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A failing case, undirected, with SWAP") { - Circuit circ(3); - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 2}); - - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {0, 1}); - reassign_boundary(circ); - REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, false)); - } - GIVEN("A working case, undirected, with SWAP") { - Circuit circ(3); - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 2}); - - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {0, 2}); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false)); - } - GIVEN("A failing case, directed, with SWAP") { - Circuit circ(3); - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); - - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {1, 0}); - reassign_boundary(circ); - REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A working case, directed, with SWAP") { - Circuit circ(3); - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); - - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {0, 1}); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false)); - } -} - -SCENARIO("Test decompose_SWAP_to_CX pass", "[routing]") { - Architecture arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 0}}); - GIVEN("A single SWAP gate. Finding if correct number of vertices added") { - Circuit circ(5); - circ.add_op(OpType::SWAP, {0, 1}); - int original_vertices = circ.n_vertices(); - reassign_boundary(circ); - Transform::decompose_SWAP_to_CX().apply(circ); - int decompose_vertices = circ.n_vertices(); - REQUIRE(decompose_vertices - original_vertices == 2); - REQUIRE(respects_connectivity_constraints(circ, arc, false)); - } - GIVEN("A single SWAP gate, finding if correct path is preserved.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {0, 1}); - // check output boundary - Vertex boundary_0 = circ.get_out(Qubit(0)); - Vertex boundary_1 = circ.get_out(Qubit(1)); - Transform::decompose_SWAP_to_CX().apply(circ); - REQUIRE(circ.get_out(Qubit(0)) == boundary_0); - REQUIRE(circ.get_out(Qubit(1)) == boundary_1); - // check output boundary is the same - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {0, 1}); - circ.add_op(OpType::CX, {0, 1}); - Transform::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[0], all[1]}; - REQUIRE(circ.get_commands()[2].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite case.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {0, 1}); - circ.add_op(OpType::CX, {1, 0}); - Transform::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[1], all[0]}; - REQUIRE(circ.get_commands()[2].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite SWAP.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {1, 0}); - circ.add_op(OpType::CX, {0, 1}); - Transform::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[0], all[1]}; - REQUIRE(circ.get_commands()[2].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite case, opposite SWAP.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {1, 0}); - circ.add_op(OpType::CX, {1, 0}); - Transform::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[1], all[0]}; - REQUIRE(circ.get_commands()[2].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite SWAP, pre CX.") { - Circuit circ(2); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::SWAP, {1, 0}); - Transform::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[0], all[1]}; - REQUIRE(circ.get_commands()[1].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite case, opposite SWAP, pre CX.") { - Circuit circ(2); - circ.add_op(OpType::CX, {1, 0}); - circ.add_op(OpType::SWAP, {1, 0}); - Transform::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[1], all[0]}; - REQUIRE(circ.get_commands()[1].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite case, opposite SWAP, pre CX, directed bool " - "on.") { - Circuit circ(2); - circ.add_op(OpType::CX, {1, 0}); - circ.add_op(OpType::SWAP, {1, 0}); - reassign_boundary(circ); - Transform::decompose_SWAP_to_CX(arc).apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[1], all[0]}; - REQUIRE(circ.get_commands()[1].get_args() == cor); - } - GIVEN("A circuit that with no CX gates, but with directed architecture.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {1, 0}); - reassign_boundary(circ); - Transform::decompose_SWAP_to_CX(arc).apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[0], all[1]}; - REQUIRE(circ.get_commands()[0].get_args() == cor); - } - GIVEN( - "A circuit that with no CX gates, but with directed architecture, " - "opposite case.") { - Architecture dummy_arc({{1, 0}}); - Circuit circ(2); - circ.add_op(OpType::SWAP, {1, 0}); - reassign_boundary(circ); - Transform::decompose_SWAP_to_CX(dummy_arc).apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[1], all[0]}; - REQUIRE(circ.get_commands()[0].get_args() == cor); - } - // TEST CIRCUIT - Circuit circ(10); - int count = 0; - for (unsigned x = 0; x < 10; ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - count += 2; - if (x % 2) { - add_2qb_gates(circ, OpType::SWAP, {{x, y}, {y + 1, y}}); - } else { - add_2qb_gates(circ, OpType::SWAP, {{y, x}, {y, y + 1}}); - } - } - } - - GIVEN("A network of SWAP gates.") { - int original_vertices = circ.n_vertices(); - std::vector original_boundary; - for (unsigned i = 0; i < circ.n_qubits(); i++) { - original_boundary.push_back(circ.get_out(Qubit(i))); - } - Transform::decompose_SWAP_to_CX().apply(circ); - int decompose_vertices = circ.n_vertices(); - for (unsigned i = 0; i < circ.n_qubits(); i++) { - REQUIRE(original_boundary[i] == circ.get_out(Qubit(i))); - } - REQUIRE(decompose_vertices - original_vertices == 2 * count); - } - GIVEN("A routed network of SWAP gates.") { - SquareGrid grid(2, 5); - Routing router(circ, grid); - std::pair output = router.solve(); - REQUIRE(output.second); - circ = output.first; - Transform::decompose_SWAP_to_CX().apply(circ); - REQUIRE(respects_connectivity_constraints(circ, grid, false, true)); - GIVEN("Directed CX gates") { - Transform::decompose_SWAP_to_CX().apply(output.first); - Transform::decompose_BRIDGE_to_CX().apply(output.first); - Transform::decompose_CX_directed(grid).apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, grid, true)); - } - } -} - -SCENARIO("Test redirect_CX_gates pass", "[routing]") { - Architecture arc({{1, 0}, {1, 2}}); - GIVEN("A circuit that requires no redirection.") { - Circuit circ(3); - add_2qb_gates(circ, OpType::CX, {{1, 0}, {1, 2}}); - reassign_boundary(circ); - Transform::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A circuit that requires redirection.") { - Circuit circ(3); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {2, 1}}); - reassign_boundary(circ); - Transform::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A circuit that requires no redirection, with SWAP.") { - Circuit circ(3); - - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {0, 1}); - - swap_v = circ.add_op(OpType::SWAP, {0, 2}); - swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {2, 1}); - reassign_boundary(circ); - Transform::decompose_SWAP_to_CX(arc).apply(circ); - Transform::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A circuit that requires redirection, with SWAP.") { - Circuit circ(3); - - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {1, 0}); - - swap_v = circ.add_op(OpType::SWAP, {0, 2}); - swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {1, 2}); - - reassign_boundary(circ); - Transform::decompose_SWAP_to_CX(arc).apply(circ); - Transform::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A complicated circuit of CX gates, routed.") { - Circuit circ(12); - SquareGrid grid(3, 4); - - for (unsigned x = 0; x < 12; ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - if (x % 2) { - add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); - } else { - add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); - } - } - } - Routing route(circ, grid); - std::pair outs = route.solve(); - REQUIRE(outs.second == true); - circ = outs.first; - Transform::decompose_BRIDGE_to_CX().apply(circ); - Transform::decompose_SWAP_to_CX(arc).apply(circ); - Transform::decompose_CX_directed(grid).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, grid, true)); - } -} - -SCENARIO("Test RoutingFrontiers and interaction vectors", "[routing]") { - GIVEN("A simple circuit") { - Circuit incirc(4); - Vertex v1 = incirc.add_op(OpType::X, {0}); - Vertex v8 = incirc.add_op(OpType::S, {3}); - Vertex v9 = incirc.add_op(OpType::T, {3}); - Vertex v2 = incirc.add_op(OpType::CX, {0, 1}); - Vertex v3 = incirc.add_op(OpType::CY, {2, 3}); - Vertex v4 = incirc.add_op(OpType::H, {0}); - Vertex v10 = incirc.add_op(OpType::X, {0}); - Vertex v11 = incirc.add_op(OpType::S, {1}); - Vertex v12 = incirc.add_op(OpType::Z, {3}); - Vertex v13 = incirc.add_op(OpType::Y, {2}); - Vertex v14 = incirc.add_op(OpType::T, {1}); - Vertex v5 = incirc.add_op(OpType::CZ, {0, 2}); - Vertex v6 = incirc.add_op(OpType::Y, {0}); - Vertex v7 = incirc.add_op(OpType::CX, {3, 1}); - - // Ring of size 4 - RingArch arc(4); - node_vector_t ring_nodes = arc.get_all_nodes_vec(); - // Create Routing Object - Routing router(incirc, arc); - RoutingTester tester(&router); - Circuit *circ = tester.get_circ(); - RoutingFrontier sf1 = router.get_slicefrontier(); - Qubit qb0(0); - Qubit qb1(1); - Qubit qb2(2); - Qubit qb3(3); - qubit_bimap_t qm; - for (unsigned i = 0; i < 4; ++i) { - qm.insert({Qubit(i), ring_nodes[i]}); - } - tester.set_qmap(qm); - WHEN("First interaction vector is generated") { - Interactions inte = tester.get_interaction(sf1); - THEN("Interaction vector is correct") { - CHECK(inte[ring_nodes.at(0)] == ring_nodes.at(1)); - CHECK(inte[ring_nodes.at(1)] == ring_nodes.at(0)); - CHECK(inte[ring_nodes.at(3)] == ring_nodes.at(2)); - CHECK(inte[ring_nodes.at(2)] == ring_nodes.at(3)); - REQUIRE(inte.size() == 4); - } - } - WHEN("One operation is completed") { - Edge new_0 = circ->skip_irrelevant_edges(circ->get_all_out_edges(v2)[0]); - Edge new_1 = circ->skip_irrelevant_edges(circ->get_all_out_edges(v2)[1]); - sf1.quantum_in_edges->replace( - sf1.quantum_in_edges->find(qb0), {qb0, new_0}); - sf1.quantum_in_edges->replace( - sf1.quantum_in_edges->find(qb1), {qb1, new_1}); - CutFrontier next_cut = circ->next_cut( - sf1.quantum_in_edges, std::make_shared()); - - sf1.slice = next_cut.slice; - sf1.quantum_out_edges = next_cut.u_frontier; - Interactions inte = tester.get_interaction(sf1); - THEN("Interaction vector is updated") { - CHECK(inte[ring_nodes.at(0)] == ring_nodes.at(0)); - CHECK(inte[ring_nodes.at(1)] == ring_nodes.at(1)); - CHECK(inte[ring_nodes.at(3)] == ring_nodes.at(2)); - CHECK(inte[ring_nodes.at(2)] == ring_nodes.at(3)); - REQUIRE(inte.size() == 4); - } - } - - WHEN("Next RoutingFrontier is generated") { - sf1.next_slicefrontier(); - THEN("The RoutingFrontier is correct") { - REQUIRE(sf1.slice->size() == 2); - CHECK( - circ->get_Op_ptr_from_Vertex(sf1.slice->at(0)) == - incirc.get_Op_ptr_from_Vertex(v5)); - CHECK( - circ->get_Op_ptr_from_Vertex(sf1.slice->at(1)) == - incirc.get_Op_ptr_from_Vertex(v7)); - - CHECK( - sf1.quantum_in_edges->find(qb1)->second != - circ->get_nth_out_edge(v2, 1)); - CHECK( - sf1.quantum_in_edges->find(qb2)->second == - circ->get_nth_in_edge(sf1.slice->at(0), 1)); - - CHECK( - sf1.quantum_out_edges->find(qb0)->second != - circ->get_nth_in_edge(v6, 0)); - CHECK( - sf1.quantum_out_edges->find(qb3)->second == - circ->get_nth_out_edge(sf1.slice->at(1), 0)); - } - sf1.next_slicefrontier(); - REQUIRE(sf1.slice->empty()); - } - } -} - -SCENARIO( - "Check that an already solved routing problem will not add unecessary " - "swaps", - "[routing]") { - GIVEN("A solved problem") { - // Test Circuit, sequential cxs on a ring, requires no routing - Circuit test_circuit; - test_circuit.add_blank_wires(4); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {1, 2}, {2, 3}, {3, 0}}); - - // Ring of size 4 - RingArch arc(4); - // Create Routing Object - Routing router(test_circuit, arc); - std::pair post_c = router.solve(); - REQUIRE(post_c.second == true); - REQUIRE(post_c.first.n_gates() == 4); - } - GIVEN("A solved problem supplied with map and custom architecture") { - Circuit test_circuit; - test_circuit.add_blank_wires(4); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {1, 2}, {2, 3}, {3, 0}}); - - Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 0}}); - Placement test_p(test_arc); - - qubit_mapping_t map_; - for (unsigned nn = 0; nn <= 3; ++nn) { - map_[Qubit(nn)] = Node(nn); - } - test_p.place_with_map(test_circuit, map_); - qubit_vector_t all_qs_post_place = test_circuit.all_qubits(); - Routing router(test_circuit, test_arc); - std::pair result = router.solve(); - qubit_vector_t all_qs_post_solve = test_circuit.all_qubits(); - - REQUIRE(all_qs_post_place == all_qs_post_solve); - REQUIRE(result.second == false); - REQUIRE(result.first.n_gates() == 4); - } -} - -SCENARIO( - "If a circuit has fewer qubits than the architecture has nodes, is a " - "correct sub-architecture made", - "[routing]") { - GIVEN("A circuit and architecture obeying said scenario") { - // 5 wires, all used - Circuit test_circuit(5); - add_2qb_gates(test_circuit, OpType::CX, {{0, 4}, {2, 3}, {1, 4}}); - - SquareGrid arc(3, 3); - Routing route(test_circuit, arc); - route.solve(); - node_vector_t nodes = route.get_active_nodes(); - - REQUIRE(nodes.size() == 5); - - // 5 wires, 4 used - Circuit test_circuit2(5); - add_2qb_gates(test_circuit2, OpType::CX, {{0, 3}, {1, 2}}); - - Routing route2(test_circuit2, arc); - route2.solve(); - node_vector_t nodes2 = route.get_active_nodes(); - - REQUIRE(nodes2.size() == 5); - } -} - -SCENARIO("Qubit activating edge case", "[routing]") { - GIVEN("A node line with only 3 qubits line placed") { - Circuit circ; - circ.add_blank_wires(4); - add_2qb_gates( - circ, OpType::CX, {{1, 0}, {2, 0}, {2, 1}, {3, 0}, {3, 1}, {3, 2}}); - circ.add_op(OpType::CU1, 0.5, {1, 0}); - circ.add_op(OpType::CU1, 0.25, {2, 0}); - circ.add_op(OpType::CU1, 0.5, {2, 1}); - circ.add_op(OpType::CU1, 0.125, {3, 0}); - circ.add_op(OpType::CU1, 0.25, {3, 1}); - circ.add_op(OpType::CU1, 0.5, {3, 2}); - Transform::rebase_tket().apply(circ); - Architecture arc({{0, 1}, {1, 2}, {2, 3}}); - Routing router(circ, arc); - std::pair c = router.solve(); - REQUIRE(respects_connectivity_constraints(c.first, arc, false, true)); - REQUIRE(c.second); - } -} - -SCENARIO("Empty Circuit test", "[routing]") { - GIVEN("An Empty Circuit") { - Circuit circ; - circ.add_blank_wires(4); - Architecture arc({{0, 1}, {1, 2}, {2, 3}}); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.first.n_gates() == 0); - REQUIRE(result.second == true); - REQUIRE(respects_connectivity_constraints(result.first, arc, true)); - } -} - -SCENARIO("Routing on circuit with no multi-qubit gates", "[routing]") { - GIVEN("A circuit with no multi-qubit gates") { - Circuit circ; - circ.add_blank_wires(4); - add_1qb_gates(circ, OpType::X, {0, 2}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::Y, {1}); - // circ.add_op(OpType::Y,{3}); - Architecture arc({{0, 1}, {1, 2}, {2, 3}}); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(circ.n_vertices() - 8 == result.first.n_gates()); - REQUIRE(result.second == true); - REQUIRE(respects_connectivity_constraints(result.first, arc, true)); - } -} - -SCENARIO("Test routing for other multi-qubit ops", "[routing]") { - GIVEN("Failed qft circuit") { - Circuit circ(4, 4); - add_1qb_gates(circ, OpType::X, {0, 2}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::CU1, 0.5, {1, 0}); - circ.add_op(OpType::CU1, 0.5, {0, 1}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::CU1, 0.25, {2, 0}); - circ.add_op(OpType::CU1, 0.5, {2, 1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::CU1, 0.125, {3, 0}); - circ.add_op(OpType::CU1, 0.25, {3, 1}); - circ.add_op(OpType::CU1, 0.5, {3, 2}); - circ.add_op(OpType::H, {3}); - for (unsigned nn = 0; nn <= 3; ++nn) { - circ.add_measure(nn, nn); - } - Transform::rebase_tket().apply(circ); - Architecture arc({{0, 1}, {1, 2}, {2, 3}}); - Routing router(circ, arc); - std::pair result = router.solve(); - - REQUIRE(respects_connectivity_constraints(result.first, arc, false, true)); - REQUIRE(result.second); - } -} - -SCENARIO( - "Test routing on a directed architecture with bidirectional edges", - "[routing]") { - GIVEN("A simple two-qubit circuit") { - Circuit circ(2); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::CX, {0, 1}); - Architecture arc({{0, 1}, {1, 0}}); - Architecture arc2(std::vector>{{0, 1}}); - - // routing ignored bi directional edge and solves correctly - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.first.n_gates() == 2); - CHECK(respects_connectivity_constraints(result.first, arc, false)); - REQUIRE(result.second == true); - } -} - -SCENARIO( - "Test routing on a directed architecture doesn't throw an error if " - "non-cx optype is presented", - "[routing]") { - GIVEN( - "A simple two-qubit circuit with non-cx multi-qubit gates and a " - "directed architecture") { - Circuit circ(2); - circ.add_op(OpType::CU1, 0.5, {1, 0}); - circ.add_op(OpType::CU1, 0.5, {0, 1}); - circ.add_op(OpType::CY, {1, 0}); - circ.add_op(OpType::CY, {0, 1}); - circ.add_op(OpType::CZ, {1, 0}); - circ.add_op(OpType::CZ, {0, 1}); - circ.add_op(OpType::CRz, 0.5, {1, 0}); - circ.add_op(OpType::CRz, 0.5, {0, 1}); - - Architecture arc(std::vector>{{0, 1}}); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.second == true); - REQUIRE(result.first.n_gates() == 8); - } -} - -SCENARIO("Dense CX circuits route succesfully", "[routing]") { - GIVEN( - "Complex CX circuits for large directed architecture based off " - "IBMTokyo") { - Circuit circ(20); - for (unsigned x = 0; x < 17; ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - if (x % 2) { // swap the way directed chain runs each time - add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); - } else { - add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); - } - } - } - Architecture arc( - {{0, 1}, {1, 2}, {2, 3}, {3, 4}, {0, 5}, {1, 6}, {1, 7}, - {2, 6}, {2, 7}, {3, 8}, {3, 9}, {4, 8}, {4, 9}, {5, 6}, - {5, 10}, {5, 11}, {6, 10}, {6, 11}, {6, 7}, {7, 12}, {7, 13}, - {7, 8}, {8, 12}, {8, 13}, {8, 9}, {10, 11}, {11, 16}, {11, 17}, - {11, 12}, {12, 16}, {12, 17}, {12, 13}, {13, 18}, {13, 19}, {13, 14}, - {14, 18}, {14, 19}, {15, 16}, {16, 17}, {17, 18}, {18, 19}}); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.second); - (Transform::decompose_SWAP_to_CX() >> Transform::decompose_BRIDGE_to_CX()) - .apply(result.first); - Transform::decompose_CX_directed(arc).apply(result.first); - REQUIRE(respects_connectivity_constraints(result.first, arc, true)); - } -} - -SCENARIO( - "Dense CX circuits route succesfully on undirected Ring with " - "placement.", - "[routing]") { - GIVEN("Complex CX circuits, big ring") { - Circuit circ(29); - for (unsigned x = 0; x < 29; ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - if (x % 2) { - add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); - } else { - add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); - } - } - } - RingArch arc(29); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.second); - Transform::decompose_SWAP_to_CX().apply(result.first); - REQUIRE(respects_connectivity_constraints(result.first, arc, false, true)); - } -} - -SCENARIO( - "Dense CX circuits route succesfully on smart placement unfriendly " - "architecture.", - "[routing]") { - GIVEN("Complex CX circuits, big ring") { - Circuit circ(13); - for (unsigned x = 0; x < 13; ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - if (x % 2) { - add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); - } else { - add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); - } - } - } - Architecture arc( - {{0, 1}, - {2, 0}, - {2, 4}, - {6, 4}, - {8, 6}, - {8, 10}, - {12, 10}, - {3, 1}, - {3, 5}, - {7, 5}, - {7, 9}, - {11, 9}, - {11, 13}, - {12, 13}, - {6, 7}}); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.second); - REQUIRE(respects_connectivity_constraints(result.first, arc, false, true)); - } -} - -SCENARIO("Empty circuits, with and without blank wires", "[routing]") { - GIVEN("An empty circuit with some qubits") { - Circuit circ(6); - RingArch arc(6); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.first.depth() == 0); - REQUIRE(result.first.n_gates() == 0); - REQUIRE(result.first.n_qubits() == 6); - REQUIRE(result.second == true); - REQUIRE(respects_connectivity_constraints(result.first, arc, true)); - } - GIVEN("An empty circuit with no qubits") { - Circuit circ(0); - RingArch arc(6); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.second == false); - REQUIRE(result.first.depth() == 0); - REQUIRE(result.first.n_gates() == 0); - REQUIRE(result.first.n_qubits() == 0); - } - - GIVEN("An empty circuit with no qubits, and empty architecture") { - Circuit circ(0); - std::vector> cons = {}; - Architecture arc(cons); - REQUIRE_THROWS_AS( - [&]() { Routing router(circ, arc); }(), ArchitectureMismatch); - } - GIVEN("An a mismatch") { - Circuit circ(5); - RingArch arc(4); - REQUIRE_THROWS_AS( - [&]() { Routing router(circ, arc); }(), ArchitectureMismatch); - } -} - -/* METHODS TO COVER IN TESTING: */ -/* Routing class: */ - -// Routing::increment_distance -SCENARIO("Does increment distance work?", "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {2, 3}, {4, 5}}); - SquareGrid test_architecture(2, 3); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - ; - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - GIVEN("Suitable Distance vector, Swap and increment.") { - unsigned diameter = test_architecture.get_diameter(); - graphs::dist_vec test_distance(diameter, 2); - Swap test_swap = {square_nodes[0], square_nodes[1]}; - int increment = 2; - unsigned distance_index = diameter - test_architecture.get_distance( - test_swap.first, test_swap.second); - int pre_increment_val = test_distance[distance_index]; - routing_tester.increment_distance(test_distance, test_swap, increment); - REQUIRE(pre_increment_val + increment == test_distance[distance_index]); - } - GIVEN("Realistic Distance Vector, non_adjacent Swap, absurd increment.") { - unsigned diameter = test_architecture.get_diameter(); - graphs::dist_vec test_distance(diameter, 2); - Swap test_swap = {square_nodes[0], square_nodes[5]}; - int increment = 30; - unsigned distance_index = diameter - test_architecture.get_distance( - test_swap.first, test_swap.second); - int pre_increment_val = test_distance[distance_index]; - routing_tester.increment_distance(test_distance, test_swap, increment); - REQUIRE(pre_increment_val + increment == test_distance[distance_index]); - } -} - -// Routing::generate_distance_vector -SCENARIO("Does generate_distance_vector work suitably?", "[routing]") { - GIVEN("A realistic small interaction vector and architecture") { - // Creating RoutingTester object - Circuit test_circuit(6); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {2, 3}, {4, 5}}); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - - std::array inte_pattern = {1, 0, 5, 3, 4, 2}; - Interactions test_interaction; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - // no placement invoked, should be 0 at diameter distance, 1 at distance 2, - // 1 at distance 1. i.e. {0,2} - graphs::dist_vec out_distances = - routing_tester.generate_distance_vector(test_interaction); - REQUIRE( - out_distances[0] == - 0); // 0 entries at distance diameter away from each other - REQUIRE( - out_distances[1] == - 2); // 2 entries at distance diameter - 1 away from each other - } - GIVEN("A realistic large interaction vector and architecture") { - // Creating larger RoutingTester object - Circuit test_circuit(10); - SquareGrid test_architecture(2, 5); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 -- 2 -- 3 -- 4 - // | | | | | - // 5 -- 6 -- 7 -- 8 -- 9 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - - unsigned ind = 0; - Interactions test_interaction; - std::array inte_pattern{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - // Expected distances: - // 9-0 -> 5 - // 8-1 -> 3 - // 7-2 -> 1 - // 6-3 -> 3 - // 5-4 -> 5 - // i.e. - graphs::dist_vec expected_distances = { - 4, 0, 4, 0}; // 4 qubits at diameter, 0 at diameter-1, 4 qubits at - // diameter-2, 0 at diameter-3 - graphs::dist_vec out_distances = - routing_tester.generate_distance_vector(test_interaction); - REQUIRE(out_distances == expected_distances); - } -} - -// Routing::update_distance_vector -SCENARIO("Does update_distance_vector update as intended?", "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - // update_distance_vector is four indiviudal increment_distances - GIVEN("Realistic Distance vector, Swap and Interaction vector.") { - unsigned diameter = test_architecture.get_diameter(); - graphs::dist_vec test_distance = {0, 2}; - unsigned ind = 0; - Interactions test_interaction; - std::array inte_pattern{1, 0, 5, 3, 4, 2}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - graphs::dist_vec quick_compare_distance = - routing_tester.generate_distance_vector(test_interaction); - REQUIRE(quick_compare_distance == test_distance); - - Swap test_swap = {square_nodes[2], square_nodes[4]}; - - // Distances from full method - graphs::dist_vec out_distance = routing_tester.update_distance_vector( - test_swap, test_distance, test_interaction); - // Forming Distances from individual steps: - // (1) 2 in test_swap is interacting with qubit 5, a distance of 2 away - // this swap brings the two qubits adjacent - unsigned distance_index_1 = - diameter - test_architecture.get_distance( - test_swap.first, test_interaction[test_swap.first]); - int pre_increment_val_1 = test_distance[distance_index_1]; - routing_tester.increment_distance( - test_distance, {test_swap.first, test_interaction[test_swap.first]}, - -2); - REQUIRE(pre_increment_val_1 - 2 == test_distance[distance_index_1]); - // (2), 4 in test_swap is not interacting, test_distances won't change - REQUIRE( - test_architecture.get_distance( - test_swap.second, test_interaction[test_swap.second]) == 0); - // (3), 4 in test_swap and the qubit 2 is interacting with 5 are adjacent, - // test_distances won't change - REQUIRE( - test_architecture.get_distance( - test_swap.second, test_interaction[test_swap.first]) == 1); - // (4), 2 in test swap and the qubit 4 is interacting with 0 are adjacent, - // test_distances won't change - REQUIRE( - test_architecture.get_distance( - test_swap.first, test_interaction[test_swap.second]) == 1); - - REQUIRE(out_distance[0] == test_distance[0]); - REQUIRE(out_distance[1] == test_distance[1]); - } -} -// Routing::pair_dists -SCENARIO( - "Does pair_dists return the correct distances, in the correct order?", - "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - GIVEN( - "Realistic architecture nodes. Distance between pair_1 less than " - "between pair_2.") { - std::pair pair_1 = {square_nodes[0], square_nodes[3]}; - std::pair pair_2 = {square_nodes[1], square_nodes[4]}; - unsigned dist_1 = - test_architecture.get_distance(pair_1.first, pair_1.second); - REQUIRE(dist_1 == 2); - unsigned dist_2 = - test_architecture.get_distance(pair_2.first, pair_2.second); - REQUIRE(dist_2 == 3); - std::pair pair_dists_results = - routing_tester.pair_dists( - pair_1.first, pair_1.second, pair_2.first, pair_2.second); - REQUIRE(pair_dists_results.first == dist_2); - REQUIRE(pair_dists_results.second == dist_1); - } - GIVEN( - "Realistic architecture nodes. Distance between pair_1 greater than " - "between pair_2.") { - std::pair pair_1 = {square_nodes[4], square_nodes[3]}; - std::pair pair_2 = {square_nodes[0], square_nodes[2]}; - unsigned dist_1 = - test_architecture.get_distance(pair_1.first, pair_1.second); - REQUIRE(dist_1 == 2); - unsigned dist_2 = - test_architecture.get_distance(pair_2.first, pair_2.second); - REQUIRE(dist_2 == 1); - std::pair pair_dists_results = - routing_tester.pair_dists( - pair_1.first, pair_1.second, pair_2.first, pair_2.second); - REQUIRE(pair_dists_results.first == dist_1); - REQUIRE(pair_dists_results.second == dist_2); - } -} - -// Routing::swap_decreases -SCENARIO( - "Does swap_decreases correctly determine between two placements?", - "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - GIVEN("A swap that improves placement for given interaction vector.") { - // only nodes 0 and 5 have an interacting pair of qubits between them - Interactions test_interaction; - unsigned ind = 0; - std::array inte_pattern{5, 1, 2, 3, 4, 0}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - Swap test_swap = {square_nodes[0], square_nodes[2]}; - Swap test_swap_interactions = {square_nodes[5], square_nodes[2]}; - // Confirm swap_decreases functions as expected - REQUIRE(routing_tester.swap_decreases(test_swap, test_interaction) == true); - // Confirm working elements of swap_decreases does also - unsigned dist_1 = test_architecture.get_distance( - test_swap.first, test_swap_interactions.first); - REQUIRE(dist_1 == 3); - unsigned dist_2 = test_architecture.get_distance( - test_swap.second, test_swap_interactions.second); - REQUIRE(dist_2 == 0); - unsigned dist_3 = test_architecture.get_distance( - test_swap.second, test_swap_interactions.first); - REQUIRE(dist_3 == 2); - unsigned dist_4 = test_architecture.get_distance( - test_swap.first, test_swap_interactions.second); - REQUIRE(dist_4 == 1); - - std::pair old_dists = {dist_1, dist_2}; - std::pair new_dists = {dist_3, dist_4}; - REQUIRE(new_dists < old_dists); - } - GIVEN("A swap containing non-interacting nodes.") { - unsigned ind = 0; - Interactions test_interaction; - // only nodes 0 and 5 have an interacting pair of qubits between them - std::array inte_pattern{5, 1, 2, 3, 4, 0}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - Swap test_swap = {square_nodes[1], square_nodes[3]}; - Swap test_swap_interactions = {square_nodes[1], square_nodes[3]}; - // Confirm swap_decreases functions as expected - REQUIRE( - routing_tester.swap_decreases(test_swap, test_interaction) == false); - } -} - -// Routing::candidate_swaps -SCENARIO("Does candidate swaps return all suitable edges?", "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - std::vector test_arc = test_architecture.get_all_edges_vec(); - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - GIVEN("One pair of interacting qubits, four suitable edges between them.") { - unsigned ind = 0; - Interactions test_interaction; - std::array inte_pattern{3, 1, 2, 0, 4, 5}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - std::vector correct_swaps = { - {square_nodes[0], square_nodes[1]}, - {square_nodes[0], square_nodes[2]}, - {square_nodes[1], square_nodes[3]}, - {square_nodes[2], square_nodes[3]}}; - std::vector test_swaps = - routing_tester.candidate_swaps(test_arc, test_interaction); - REQUIRE(test_swaps.size() == 4); - REQUIRE(test_swaps == correct_swaps); - } - GIVEN("A case wherein no edges are suitable.") { - unsigned ind = 0; - Interactions test_interaction; - // easiest to replicate this case by making all interactions adjacent - std::array inte_pattern{1, 0, 3, 2, 5, 4}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - std::vector test_swaps = - routing_tester.candidate_swaps(test_arc, test_interaction); - REQUIRE(test_swaps.size() == 0); - } - GIVEN( - "A case with all qubits interacting, 5 suitable edges between " - "them.") { - unsigned ind = 0; - Interactions test_interaction; - std::array inte_pattern{5, 2, 1, 4, 3, 0}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - std::vector correct_swaps = { - {square_nodes[0], square_nodes[1]}, - {square_nodes[0], square_nodes[2]}, - {square_nodes[2], square_nodes[3]}, - {square_nodes[3], square_nodes[5]}, - {square_nodes[4], square_nodes[5]}}; - std::vector test_swaps = - routing_tester.candidate_swaps(test_arc, test_interaction); - REQUIRE(test_swaps.size() == 5); - REQUIRE(test_swaps == correct_swaps); - } -} - -// Routing::cowtan_et_al_heuristic -SCENARIO( - "Does implementation of heuristic outlined in paper work as expected?", - "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - GIVEN("One pair of interacting qubits, four suitable swap gates.") { - std::vector test_swaps = { - {square_nodes[0], square_nodes[1]}, - {square_nodes[0], square_nodes[2]}, - {square_nodes[1], square_nodes[3]}, - {square_nodes[2], square_nodes[3]}, - {square_nodes[3], square_nodes[5]}}; - graphs::dist_vec test_distances = {0, 2}; - Interactions test_interaction; - unsigned ind = 0; - std::array inte_pattern{3, 1, 2, 0, 4, 5}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - std::vector output_swaps = routing_tester.cowtan_et_al_heuristic( - test_swaps, test_distances, test_interaction); - std::vector expected_output = { - {square_nodes[0], square_nodes[1]}, - {square_nodes[0], square_nodes[2]}, - {square_nodes[1], square_nodes[3]}, - {square_nodes[2], square_nodes[3]}}; - REQUIRE(output_swaps == expected_output); - } - GIVEN("Two pairs of interacting qubits, two suitable swap gates.") { - std::vector test_swaps = { - {square_nodes[0], square_nodes[1]}, {square_nodes[0], square_nodes[2]}, - {square_nodes[1], square_nodes[3]}, {square_nodes[2], square_nodes[3]}, - {square_nodes[2], square_nodes[4]}, {square_nodes[3], square_nodes[5]}, - {square_nodes[4], square_nodes[5]}}; - unsigned ind = 0; - Interactions test_interaction; - std::array inte_pattern{3, 4, 2, 0, 1, 5}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - graphs::dist_vec test_distances = {2, 2}; - std::vector output_swaps = routing_tester.cowtan_et_al_heuristic( - test_swaps, test_distances, test_interaction); - std::vector expected_output = { - {square_nodes[0], square_nodes[1]}, {square_nodes[1], square_nodes[3]}}; - REQUIRE(output_swaps == expected_output); - } -} - -// Routing::update_qmap -SCENARIO("Does update qmap correctly update mapping from swap?", "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(2); - RingArch test_architecture(2); - node_vector_t ring_nodes = test_architecture.get_all_nodes_vec(); - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - Qubit qb0(0); - Qubit qb1(1); - qubit_bimap_t test_map; - test_map.left.insert({qb0, ring_nodes[0]}); - test_map.left.insert({qb1, ring_nodes[1]}); - - routing_tester.update_qmap(test_map, {ring_nodes[0], ring_nodes[1]}); - REQUIRE(test_map.right.at(ring_nodes[0]) == qb1); - REQUIRE(test_map.right.at(ring_nodes[1]) == qb0); -} - -// Routing::solve_furthest interior functions -SCENARIO( - "Do solve_furthest interior methods find and swap along the expected " - "path?", - "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {2, 3}, {4, 5}}); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - - unsigned node0, node1; - node_vector_t expected_path; - std::vector expected_swaps; - GIVEN("An expected path with an even number of nodes.") { - node0 = 0, node1 = 5; - expected_path = { - square_nodes[5], square_nodes[3], square_nodes[1], square_nodes[0]}; - expected_swaps = { - {square_nodes[5], square_nodes[3]}, {square_nodes[3], square_nodes[1]}}; - } - GIVEN("An expected path with an odd number of nodes.") { - node0 = 0, node1 = 3; - expected_path = {square_nodes[3], square_nodes[1], square_nodes[0]}; - expected_swaps = {{square_nodes[3], square_nodes[1]}}; - } - GIVEN("An adjacent path doesn't fail.") { - node0 = 0, node1 = 1; - expected_path = {square_nodes[1], square_nodes[0]}; - expected_swaps = {}; - } - // Collect path from architecture - const node_vector_t test_path = - test_architecture.get_path(square_nodes[node0], square_nodes[node1]); - REQUIRE(test_path == expected_path); - qubit_bimap_t test_map; - for (unsigned i = 0; i < 6; i++) { - test_map.left.insert({Qubit(i), square_nodes[i]}); - } - routing_tester.set_qmap(test_map); - const std::vector path_swaps = routing_tester.path_to_swaps(test_path); - REQUIRE(path_swaps == expected_swaps); -} - -// generate_test_interaction_graph and qubit_lines -SCENARIO("Test interaction graph and line generation", "[routing]") { - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - - GIVEN("A small test circuit with 1 layer, all qubits in 2qb gates.") { - Circuit test_circuit(6); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {2, 3}, {4, 5}}); - QubitGraph test_qubit_graph = generate_interaction_graph(test_circuit); - - REQUIRE(test_qubit_graph.n_connections() == 3); - REQUIRE(test_qubit_graph.edge_exists(Qubit(0), Qubit(1))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(2), Qubit(3))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(4), Qubit(5))); - - QubitLineList qlines = qubit_lines(test_circuit); - QubitLineList correct_lines = { - {Qubit(0), Qubit(1)}, {Qubit(2), Qubit(3)}, {Qubit(4), Qubit(5)}}; - REQUIRE(qlines == correct_lines); - } - GIVEN("A small test circuit with 1 layer, not all qubits in 2qb gates.") { - Circuit test_circuit(6); - test_circuit.add_op(OpType::CX, {0, 1}); - test_circuit.add_op(OpType::H, {5}); - test_circuit.add_op(OpType::H, {3}); - test_circuit.add_op(OpType::CX, {2, 4}); - - QubitGraph test_qubit_graph = generate_interaction_graph(test_circuit); - - REQUIRE(test_qubit_graph.n_connections() == 2); - REQUIRE(test_qubit_graph.edge_exists(Qubit(0), Qubit(1))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(2), Qubit(4))); - - QubitLineList qlines = qubit_lines(test_circuit); - QubitLineList correct_lines = { - {Qubit(0), Qubit(1)}, - {Qubit(2), Qubit(4)}, - {Qubit(3)}, - {Qubit(5)}}; // It is not guaranteed to match qubit numbers as qubits - // are not unsigneds - REQUIRE(qlines == correct_lines); - } - GIVEN("A small test circuit with 2 layers.") { - Circuit test_circuit(6); - add_2qb_gates( - test_circuit, OpType::CX, - {{0, 1}, {2, 3}, {4, 5}, {2, 1}, {4, 3}, {5, 1}}); - - QubitGraph test_qubit_graph = generate_interaction_graph(test_circuit); - - REQUIRE(test_qubit_graph.n_connections() == 5); - REQUIRE(test_qubit_graph.edge_exists(Qubit(0), Qubit(1))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(2), Qubit(3))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(4), Qubit(5))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(2), Qubit(1))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(4), Qubit(3))); - - QubitLineList qlines = qubit_lines(test_circuit); - QubitLineList correct_lines = { - {Qubit(0), Qubit(1), Qubit(2), Qubit(3), Qubit(4), Qubit(5)}}; - REQUIRE(qlines == correct_lines); - } -} - -// solve_with_map -SCENARIO("Test routing with partial map provided", "[routing]") { - GIVEN("A partial map where no node should be removed.") { - Circuit circ(3); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::CX, {0, 2}); - Architecture arc({{0, 1}, {1, 2}}); - - // force a partial map which requires unused node to solve - Placement pl(arc); - qubit_mapping_t map_ = {{Qubit(0), Node(0)}, {Qubit(2), Node(2)}}; - pl.place_with_map(circ, map_); - Routing router(circ, arc); - std::pair result = router.solve(); - - REQUIRE(result.second == true); - // check solution is valid and respects map - std::vector test_coms = result.first.get_commands(); - REQUIRE(test_coms.size() == 3); - bool oph = (*test_coms[0].get_op_ptr() == *get_op_ptr(OpType::H)); - oph &= (test_coms[0].get_args()[0] == Node(0)); - REQUIRE(oph); - REQUIRE(*test_coms[1].get_op_ptr() == *get_op_ptr(OpType::SWAP)); - REQUIRE(*test_coms[2].get_op_ptr() == *get_op_ptr(OpType::CX)); - } - - GIVEN("A mapped set of nodes") { - Circuit circ(4); - Qubit qb0(0); - Qubit qb1(1); - Qubit qb2(2); - Qubit qb3(3); - // test removal only happpens if subgraph remains connected - SquareGrid test_architecture(3, 2); - Architecture subarc = test_architecture; - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - // subarc = {0, 1, 3} - subarc.remove_node(square_nodes[5]); - subarc.remove_node(square_nodes[4]); - subarc.remove_node(square_nodes[3]); - REQUIRE(subgraph_remove_if_connected( - test_architecture, subarc, square_nodes[3])); - REQUIRE(!subgraph_remove_if_connected( - test_architecture, subarc, square_nodes[1])); - REQUIRE(subgraph_remove_if_connected( - test_architecture, subarc, square_nodes[4])); - REQUIRE(subgraph_remove_if_connected( - test_architecture, subarc, square_nodes[5])); - REQUIRE(test_architecture.n_connections() == 2); - - SquareGrid test_architecture2(3, 2); - qubit_bimap_t map; - map.left.insert({qb0, square_nodes[0]}); - map.left.insert({qb1, square_nodes[1]}); - map.left.insert({qb2, square_nodes[2]}); - - remove_unmapped_nodes(test_architecture2, map, circ); - REQUIRE(test_architecture2.n_connections() == 2); - REQUIRE(test_architecture2.edge_exists(square_nodes[0], square_nodes[1])); - REQUIRE(test_architecture2.edge_exists(square_nodes[0], square_nodes[2])); - - // test unmapped nodes which cannot be removed are mapped to a qubit - map.left.erase(qb0); - - remove_unmapped_nodes(test_architecture2, map, circ); - REQUIRE(map.left.find(qb0)->second == square_nodes[0]); - REQUIRE(test_architecture2.n_connections() == 2); - REQUIRE(test_architecture2.edge_exists(square_nodes[0], square_nodes[1])); - REQUIRE(test_architecture2.edge_exists(square_nodes[0], square_nodes[2])); - - // test when an unmapped node is mapped, the most connected is chosen - // (i.e. least connected nodes are removed first) - Architecture test_architecture3({{0, 1}, {0, 2}, {1, 3}, {2, 3}, {2, 4}}); - // 0 -- 1 - // | | - // 2 -- 3 - // | - // 4 - - qubit_bimap_t map2; - map2.left.insert({qb0, Node(0)}); - map2.left.insert({qb3, Node(3)}); - remove_unmapped_nodes(test_architecture3, map2, circ); - REQUIRE(map2.right.find(Node(2))->second == qb1); - bool no_4 = map2.right.find(Node(4)) == map2.right.end(); - REQUIRE(no_4); - } -} - -// Every command in the circuit with a specified optype -// must have a specified single qubit argument. -static void require_arguments_for_specified_commands( - const Circuit &circ, const std::map &the_map) { - for (Command com : circ) { - const auto type = com.get_op_ptr()->get_type(); - const auto citer = the_map.find(type); - if (citer != the_map.cend()) { - unit_vector_t comp = {citer->second}; - REQUIRE(com.get_args() == comp); - } - } -} - -SCENARIO( - "Does shifting single qubit gates through SWAP gates to get find nodes " - "with better fidelity work?", - "[routing]") { - Architecture arc({{0, 1}, {1, 2}}); - gate_error_t ge_0(0.3); - gate_error_t ge_1(0.2); - gate_error_t ge_2(0.1); - op_node_errors_t nec; - op_errors_t gec_0({{OpType::H, ge_0}, {OpType::X, ge_1}}); - op_errors_t gec_1({{OpType::H, ge_1}, {OpType::X, ge_2}}); - op_errors_t gec_2({{OpType::H, ge_2}, {OpType::X, ge_0}}); - - nec.insert({Node(2), gec_2}); - nec.insert({Node(0), gec_0}); - nec.insert({Node(1), gec_1}); - - GIVEN( - "A simple two qubit circuit with clear difference between node " - "fidelities") { - Circuit circ(2); - add_1qb_gates(circ, OpType::H, {0, 1}); - circ.add_op(OpType::SWAP, {0, 1}); - reassign_boundary(circ); - Transform::commute_SQ_gates_through_SWAPS(nec).apply(circ); - require_arguments_for_specified_commands( - circ, {{OpType::H, circ.all_qubits().at(1)}}); - } - GIVEN( - "A simple two qubit circuit with multiple single qubit operations " - "requiring movement before a SWAP.") { - Circuit circ(2); - add_1qb_gates(circ, OpType::H, {0, 0, 0, 1}); - circ.add_op(OpType::SWAP, {0, 1}); - reassign_boundary(circ); - Transform::commute_SQ_gates_through_SWAPS(nec).apply(circ); - require_arguments_for_specified_commands( - circ, {{OpType::H, circ.all_qubits().at(1)}}); - } - GIVEN("Multiple SWAP gates, multiple single qubit gates.") { - Circuit circ(3); - add_1qb_gates(circ, OpType::H, {0, 0, 0, 1}); - add_2qb_gates(circ, OpType::SWAP, {{0, 1}, {1, 2}, {0, 1}, {1, 2}, {1, 2}}); - - reassign_boundary(circ); - Transform::commute_SQ_gates_through_SWAPS(nec).apply(circ); - require_arguments_for_specified_commands( - circ, {{OpType::H, circ.all_qubits().at(2)}}); - } - GIVEN( - "Multiple SWAP gates, multiple single qubit gates of various " - "OpType.") { - Circuit circ(3); - add_1qb_gates(circ, OpType::X, {0, 0, 1, 1}); - add_1qb_gates(circ, OpType::H, {0, 0, 0, 1}); - add_2qb_gates(circ, OpType::SWAP, {{0, 1}, {1, 2}, {0, 1}, {1, 2}, {1, 2}}); - - reassign_boundary(circ); - Transform::commute_SQ_gates_through_SWAPS(nec).apply(circ); - const qubit_vector_t qbs = circ.all_qubits(); - require_arguments_for_specified_commands( - circ, {{OpType::H, qbs.at(2)}, {OpType::X, qbs.at(1)}}); - } - GIVEN( - "A large circuit of CX gates, H gates and X gates, routed and " - "shifted.") { - Circuit circ(9); - for (unsigned x = 0; x < circ.n_qubits(); ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - if (x % 2) { - circ.add_op(OpType::SWAP, {x, y}); - circ.add_op(OpType::X, {x}); - circ.add_op(OpType::H, {x}); - circ.add_op(OpType::SWAP, {y + 1, y}); - } else { - circ.add_op(OpType::SWAP, {y, x}); - circ.add_op(OpType::H, {y}); - circ.add_op(OpType::X, {y}); - circ.add_op(OpType::SWAP, {y, y + 1}); - } - } - } - SquareGrid arc(3, 3); - node_vector_t square_nodes = arc.get_all_nodes_vec(); - - const std::vector gate_errors{ - 0.3, 0.2, 0.1, 0.02, 0.22, 0.46, 0.18, 1.0 - 0.907, 1.0 - 0.7241}; - REQUIRE(arc.get_columns() * arc.get_rows() == gate_errors.size()); - REQUIRE(gate_errors.size() == square_nodes.size()); - REQUIRE(circ.n_qubits() == gate_errors.size()); - op_node_errors_t nec; - unsigned ind = 0; - for (unsigned nn = 0; nn < square_nodes.size(); ++nn) { - nec[square_nodes[nn]] = op_errors_t( - {{OpType::H, gate_errors[nn]}, - {OpType::X, gate_errors[(nn + 3) % gate_errors.size()]}}); - } - DeviceCharacterisation characterisation(nec); - - Circuit test_0 = circ; - reassign_boundary(test_0, square_nodes); - Transform::decompose_SWAP_to_CX().apply(test_0); - const auto sv0 = tket_sim::get_statevector(test_0); - double pre_aggregate = 0; - - qubit_bimap_t qmap; - qubit_vector_t free_qs = test_0.all_qubits(); - for (unsigned u = 0; u < free_qs.size(); u++) { - qmap.insert({free_qs[u], square_nodes[u]}); - } - - for (Command com : test_0) { - OpType ot = com.get_op_ptr()->get_type(); - if (ot == OpType::X || ot == OpType::H) { - Node n = qmap.left.at(Qubit(com.get_args()[0])); - pre_aggregate += 1.0 - characterisation.get_error(Node(n), ot); - } - } - reassign_boundary(circ, square_nodes); - Transform::commute_SQ_gates_through_SWAPS(nec).apply(circ); - Circuit test_1 = circ; - Transform::decompose_SWAP_to_CX().apply(test_1); - const auto sv1 = tket_sim::get_statevector(test_1); - double post_aggregate = 0; - for (Command com : test_1) { - OpType ot = com.get_op_ptr()->get_type(); - if (ot == OpType::X || ot == OpType::H) { - Node n = qmap.left.at(Qubit(com.get_args()[0])); - post_aggregate += 1.0 - characterisation.get_error(Node(n), ot); - } - } - REQUIRE(tket_sim::compare_statevectors_or_unitaries(sv0, sv1)); - REQUIRE(post_aggregate > pre_aggregate); - } -} -SCENARIO("Test barrier is ignored by routing") { - GIVEN("Circuit with 1qb barrier") { - Circuit circ(3); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::Rz, 0.3, {0}); - circ.add_barrier(uvec{0}); - circ.add_op(OpType::CX, {1, 2}); - SquareGrid test_architecture(1, 3); - GraphPlacement gp(test_architecture); - gp.place(circ); - Routing router(circ, test_architecture); - Circuit pc = router.solve().first; - REQUIRE(pc.depth() == 2); - check_command_types( - pc, {OpType::CX, OpType::Rz, OpType::CX, OpType::Barrier}); - } - GIVEN("Circuit with 2 qb barrier") { - Circuit circ(2); - circ.add_op(OpType::CX, {1, 0}); - circ.add_barrier({0, 1}); - circ.add_op(OpType::CX, {0, 1}); - SquareGrid test_architecture(1, 2); - Routing router(circ, test_architecture); - check_command_types( - router.solve().first, {OpType::CX, OpType::Barrier, OpType::CX}); - } - GIVEN("Circuit with 4 qb barrier, using gen_full_mapping_pass.") { - const std::vector nums = { - Node("rig", 21), Node("rig", 22), Node("rig", 25), Node("rig", 35), - Node("rig", 36)}; - const std::vector> coupling_list_indices = { - {0, 1}, {0, 4}, {1, 0}, {1, 3}, {4, 0}, {4, 3}, {3, 1}, {3, 4}}; - - std::vector> coupling_list; - for (const auto &pair : coupling_list_indices) { - coupling_list.push_back( - std::make_pair(nums[pair.first], nums[pair.second])); - } - Architecture arc(coupling_list); - Circuit circ(4); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - add_2qb_gates(circ, OpType::CZ, {{1, 2}, {3, 2}, {3, 1}}); - circ.add_barrier({0, 1, 2, 3}); - PlacementPtr pp = std::make_shared(arc); - RoutingMethodPtr rmp = std::make_shared(100); - PassPtr p = gen_full_mapping_pass(arc, pp, {rmp}); - CompilationUnit cu(circ); - p->apply(cu); - REQUIRE( - respects_connectivity_constraints(cu.get_circ_ref(), arc, false, true)); - } - GIVEN("Check Circuit with 2qb barrier does not add swaps for the barrier") { - Circuit circ(3); - Architecture line({{0, 1}, {1, 2}}); - GraphPlacement gp(line); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - circ.add_barrier({0, 2}); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - gp.place(circ); - - Routing router(circ, line); - qubit_vector_t all_qs_pre = circ.all_qubits(); - std::pair pc = router.solve(); - qubit_vector_t all_qs_post = circ.all_qubits(); - REQUIRE(all_qs_pre == all_qs_post); - REQUIRE(pc.second == false); - REQUIRE(pc.first.depth() == 4); - } - GIVEN( - "Check Circuit with 2qb barrier does not add swaps for the barrier, " - "with no placement.") { - Circuit circ(3); - Architecture line({{0, 1}, {1, 2}}); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - circ.add_barrier({0, 2}); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - GraphPlacement gp(line); - gp.place(circ); - Routing router(circ, line); - unsigned pre_depth = circ.depth(); - std::pair pc = router.solve({}); - REQUIRE(pc.second == false); - unsigned post_depth = pc.first.depth(); - REQUIRE(post_depth == pre_depth); - REQUIRE(post_depth == 4); - } - GIVEN("Circuit with 3 qb barrier") { - Circuit circ(3); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - circ.add_barrier({0, 1, 2}); - Architecture line({{0, 1}, {1, 2}}); - GraphPlacement gp(line); - gp.place(circ); - Routing router(circ, line); - qubit_vector_t all_qs_pre = circ.all_qubits(); - std::pair pc = router.solve(); - qubit_vector_t all_qs_post = circ.all_qubits(); - REQUIRE(all_qs_pre == all_qs_post); - REQUIRE(pc.first.depth() == 2); - REQUIRE(pc.second == false); - } -} - -SCENARIO( - "Does identification and insertion of bridge circuit do as expected?") { - GIVEN( - "A proposed SWAP which will act detrimentally for the next timestep, " - "i.e. a bridge should be inserted.") { - Circuit circ(9); - for (unsigned i = 0; i < 9; i++) { - circ.add_op(OpType::H, {i}); - } - add_2qb_gates(circ, OpType::CX, {{0, 4}, {3, 8}, {4, 7}, {3, 6}}); - - SquareGrid arc(3, 3); - node_vector_t square_nodes = arc.get_all_nodes_vec(); - Routing router(circ, arc); - RoutingTester test_router(&router); - RoutingConfig new_config(50, 0, 0, 0); - test_router.set_config(new_config); - qubit_bimap_t qmap = test_router.set_default_initial_map(square_nodes); - test_router.initialise_slicefrontier(); - test_router.set_interaction(); - std::pair, std::pair> output = - test_router.check_distributed_cx({square_nodes[1], square_nodes[4]}); - std::pair, std::pair> expected = { - {false, Node(0)}, {false, Node(0)}}; - REQUIRE(output == expected); - } - GIVEN( - "A proposed SWAP which will act better for the next timestep, i.e. a " - "bridge should not be inserted.") { - Circuit circ(9); - for (unsigned i = 0; i < 9; i++) { - circ.add_op(OpType::H, {i}); - } - add_2qb_gates(circ, OpType::CX, {{0, 4}, {3, 8}, {4, 7}, {3, 6}}); - - SquareGrid arc(3, 3); - node_vector_t square_nodes = arc.get_all_nodes_vec(); - Routing router(circ, arc); - RoutingTester test_router(&router); - qubit_bimap_t qmap = test_router.set_default_initial_map(square_nodes); - RoutingConfig new_config(50, 0, 0, 0); - test_router.set_config(new_config); - test_router.initialise_slicefrontier(); - test_router.set_interaction(); - std::pair, std::pair> output = - test_router.check_distributed_cx({square_nodes[3], square_nodes[4]}); - std::pair, std::pair> expected{ - {false, Node(0)}, {false, Node(0)}}; - REQUIRE(output == expected); - } - GIVEN("Multiple bridges to be inserted.") { - Circuit circ(6); - SquareGrid arc(6, 1); - add_2qb_gates(circ, OpType::CX, {{0, 2}, {3, 5}, {1, 3}}); - node_vector_t square_nodes = arc.get_all_nodes_vec(); - Routing router(circ, arc); - RoutingTester test_router(&router); - qubit_bimap_t qmap = test_router.set_default_initial_map(square_nodes); - test_router.initialise_slicefrontier(); - test_router.set_interaction(); - test_router.add_distributed_cx( - square_nodes[3], square_nodes[5], square_nodes[4]); - test_router.add_distributed_cx( - square_nodes[0], square_nodes[2], square_nodes[1]); - REQUIRE(test_router.get_circ()->n_gates() == 3); - test_router.advance_frontier(); - } - GIVEN("Consecutive CX edge case") { - Circuit circ(5); - Architecture arc({{0, 1}, {1, 2}, {0, 3}, {1, 4}, {3, 4}}); - add_2qb_gates(circ, OpType::CX, {{1, 2}, {0, 2}, {0, 1}}); - Routing router(circ, arc); - RoutingTester test_router(&router); - qubit_bimap_t qmap = test_router.set_default_initial_map(); - test_router.initialise_slicefrontier(); - test_router.advance_frontier(); - test_router.set_interaction(); - test_router.add_distributed_cx(Node(0), Node(2), Node(1)); - test_router.advance_frontier(); - } -} - -SCENARIO( - "Do Placement and Routing work if the given graph perfectly solves the " - "problem?") { - GIVEN("A perfect example without clifford_simp") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{1, 2}, - {0, 3}, - {1, 4}, - {1, 2}, - {0, 1}, - {2, 0}, - {2, 1}, - {0, 1}, - {2, 0}, - {1, 4}, - {1, 3}, - {1, 0}}); - - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - RoutingConfig default_config(50, 0, 0, 0); - QubitGraph q_graph = - monomorph_interaction_graph(circ, arc.n_connections(), 5); - std::vector potential_maps = - monomorphism_edge_break(arc, q_graph, 10000, 60000); - - qubit_mapping_t init_map = bimap_to_map(potential_maps[0].left); - Placement pl(arc); - pl.place_with_map(circ, init_map); - Routing router(circ, arc); - std::pair out_circ = router.solve(default_config); - REQUIRE( - respects_connectivity_constraints(out_circ.first, arc, false) == true); - REQUIRE(out_circ.second); - } - GIVEN( - "The circuit left after clifford_simp, without clifford simp " - "applied") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{0, 3}, - {1, 4}, - {0, 1}, - {2, 0}, - {2, 1}, - {1, 0}, - {0, 4}, - {2, 1}, - {0, 3}}); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - RoutingConfig default_config(50, 0, 0, 0); - QubitGraph q_graph = - monomorph_interaction_graph(circ, arc.n_connections(), 5); - std::vector potential_maps = - monomorphism_edge_break(arc, q_graph, 10000, 60000); - qubit_mapping_t init_map = bimap_to_map(potential_maps[0].left); - Placement pl(arc); - pl.place_with_map(circ, init_map); - Routing router(circ, arc); - std::pair out_circ = router.solve(default_config); - REQUIRE( - respects_connectivity_constraints(out_circ.first, arc, false) == true); - REQUIRE(out_circ.second); - } - GIVEN( - "A smaller circuit that once had a segmentation fault when iterating " - "through commands after clifford_simp() is applied and routing " - "completed.") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{1, 2}, {0, 3}, {1, 4}, {0, 1}, {2, 0}, {0, 1}, {1, 0}}); - Transform::clifford_simp().apply(circ); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - RoutingConfig default_config(50, 0, 0, 0); - QubitGraph q_graph = - monomorph_interaction_graph(circ, arc.n_connections(), 5); - std::vector potential_maps = - monomorphism_edge_break(arc, q_graph, 10000, 60000); - - qubit_mapping_t init_map = bimap_to_map(potential_maps[0].left); - - Placement pl(arc); - pl.place_with_map(circ, init_map); - Routing router(circ, arc); - Circuit out_circ = router.solve(default_config).first; - REQUIRE(respects_connectivity_constraints(out_circ, arc, false) == true); - } - GIVEN("The circuit that dies with clifford_simp") { - Circuit circ(5); - add_2qb_gates(circ, OpType::CX, {{0, 3}, {1, 4}, {1, 0}, {2, 1}}); - circ.add_op(OpType::SWAP, {3, 4}); - circ.add_op(OpType::Z, {4}); - circ.replace_SWAPs(); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - RoutingConfig default_config(50, 0, 0, 0); - GraphPlacement pl(arc); - qubit_mapping_t pl_map = pl.get_placement_map(circ); - pl.place(circ); - Routing router(circ, arc); - Circuit out_circ = router.solve(default_config).first; - qubit_mapping_t map = router.return_final_map(); - Vertex x = out_circ.add_op(OpType::X, {map.at(pl_map.at(Qubit(4)))}); - Vertex pred = out_circ.get_predecessors(x).front(); - REQUIRE(out_circ.get_OpType_from_Vertex(pred) == OpType::Z); - REQUIRE(NoWireSwapsPredicate().verify(out_circ)); - REQUIRE(respects_connectivity_constraints(out_circ, arc, false) == true); - } -} - -SCENARIO( - "Does the decompose_BRIDGE_gates function correctly decompose the " - "BRIDGE Op, and pick the correct decomposition given the structure of " - "surrounding CX gates?") { - GIVEN("A single BRIDGE gate to be decomposed.") { - Architecture test_arc({{0, 1}, {1, 2}}); - Circuit test_pc(3); - test_pc.add_op(OpType::BRIDGE, {0, 1, 2}); - Transform::decompose_BRIDGE_to_CX().apply(test_pc); - auto it = test_pc.begin(); - unit_vector_t opt1 = {Qubit(0), Qubit(1)}; - unit_vector_t opt2 = {Qubit(1), Qubit(2)}; - CHECK(it->get_op_ptr()->get_type() == OpType::CX); - CHECK(it->get_args() == opt2); - ++it; - CHECK(it->get_op_ptr()->get_type() == OpType::CX); - CHECK(it->get_args() == opt1); - ++it; - CHECK(it->get_op_ptr()->get_type() == OpType::CX); - CHECK(it->get_args() == opt2); - ++it; - CHECK(it->get_op_ptr()->get_type() == OpType::CX); - CHECK(it->get_args() == opt1); - } - GIVEN("MultpleBRIDGE gate to be decomposed.") { - Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}}); - Circuit test_circuit(6); - test_circuit.add_op(OpType::BRIDGE, {0, 1, 2}); - test_circuit.add_op(OpType::BRIDGE, {1, 2, 3}); - test_circuit.add_op(OpType::BRIDGE, {2, 1, 0}); - test_circuit.add_op(OpType::BRIDGE, {2, 3, 4}); - test_circuit.add_op(OpType::BRIDGE, {3, 4, 5}); - Circuit test_pc(test_circuit); - Transform::decompose_BRIDGE_to_CX().apply(test_pc); - REQUIRE(test_pc.n_gates() == 20); - } -} - -SCENARIO("Does the rerouting of a solved circuit return 'false'?") { - GIVEN("A simple circuit using default solve.") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{0, 3}, - {1, 4}, - {0, 1}, - {2, 0}, - {2, 1}, - {1, 0}, - {0, 4}, - {2, 1}, - {0, 3}}); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - Routing router(circ, arc); - std::pair out_circ = router.solve(); - REQUIRE(out_circ.second == true); - Routing router2(out_circ.first, arc); - std::pair test_out2 = router2.solve(); - REQUIRE(test_out2.second == false); - Routing router3(test_out2.first, arc); - std::pair test_out3 = router3.solve(); - REQUIRE(test_out3.second == false); - } - GIVEN("A simple circuit, but using a custom map for finding a solution.") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{0, 3}, - {1, 4}, - {0, 1}, - {2, 0}, - {2, 1}, - {1, 0}, - {0, 4}, - {2, 1}, - {0, 3}}); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - RoutingConfig default_config(50, 0, 0, 0); - QubitGraph q_graph = - monomorph_interaction_graph(circ, arc.n_connections(), 5); - std::vector potential_maps = - monomorphism_edge_break(arc, q_graph, 10000, 60000); - qubit_mapping_t init_map = bimap_to_map(potential_maps[0].left); - Placement pl(arc); - pl.place_with_map(circ, init_map); - Routing router(circ, arc); - std::pair out_circ = router.solve(default_config); - REQUIRE( - respects_connectivity_constraints(out_circ.first, arc, false) == true); - REQUIRE(out_circ.second); - - // Now try repeating it, making sure returned bool changes - // make a LinePlacement plaer for the architecture - LinePlacement lp_d(arc); - - Circuit c0 = out_circ.first; - qubit_mapping_t m_0 = lp_d.get_placement_map(c0); - lp_d.place_with_map(c0, m_0); - Routing router2(c0, arc); - std::pair test_out2 = router2.solve(); - - Circuit c1 = test_out2.first; - REQUIRE(test_out2.second == true); - Routing router3(c1, arc); - qubit_vector_t pre_c1 = c1.all_qubits(); - std::pair test_out3 = router3.solve(); - qubit_vector_t post_c1 = test_out3.first.all_qubits(); - REQUIRE(test_out3.second == false); - Circuit c2 = test_out3.first; - Routing router4(c2, arc); - std::pair test_out4 = router4.solve(); - REQUIRE(test_out4.second == false); - } -} -SCENARIO("Routing on architecture with non-contiguous qubit labels") { - GIVEN("A 2-qubit architecture with a gap") { - Architecture arc(std::vector>{{0, 2}}); - PassPtr pass = gen_default_mapping_pass(arc); - Circuit circ(2); - CompilationUnit cu(circ); - pass->apply(cu); - } - GIVEN("A 2-qubit architecture with a gap and some two-qubit gates") { - Architecture arc(std::vector>{{0, 2}}); - PassPtr pass = gen_default_mapping_pass(arc); - Circuit circ(2); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CZ, {1, 0}); - circ.add_op(OpType::SWAP, {0, 1}); - CompilationUnit cu(circ); - pass->apply(cu); - } -} - -SCENARIO("Routing of aas example") { - GIVEN("aas routing - simple example") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example II") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example III") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example IV") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::Rz, 0.1, {2}); - circ.add_op(OpType::Rz, 0.1, {3}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example V") { - Architecture arc(std::vector{{Node(0), Node(1)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(2); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example VI") { - Architecture arc(std::vector{{Node(0), Node(2)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(2); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - - CompilationUnit cu(circ); - - REQUIRE(pass->apply(cu)); - - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - - const auto s = tket_sim::get_unitary(circ); - const auto s1 = tket_sim::get_unitary(result); - REQUIRE(tket_sim::compare_statevectors_or_unitaries( - s, s1, tket_sim::MatrixEquivalence::EQUAL)); - } - GIVEN("aas routing - simple example VII") { - Architecture arc(std::vector{ - {Node(0), Node(2)}, {Node(2), Node(4)}, {Node(4), Node(6)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::Rz, 0.1, {2}); - circ.add_op(OpType::Rz, 0.1, {3}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - - REQUIRE(pass->apply(cu)); - - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - - const auto s = tket_sim::get_unitary(circ); - const auto s1 = tket_sim::get_unitary(result); - REQUIRE(tket_sim::compare_statevectors_or_unitaries( - s, s1, tket_sim::MatrixEquivalence::EQUAL)); - } - GIVEN("aas routing - simple example VIII") { - Architecture arc(std::vector{ - {Node(1000), Node(10)}, {Node(10), Node(100)}, {Node(100), Node(1)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::Rz, 0.1, {2}); - circ.add_op(OpType::Rz, 0.1, {3}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - - REQUIRE(pass->apply(cu)); - - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example IX, other gate set") { - Architecture arc(std::vector{ - {Node(1000), Node(10)}, {Node(10), Node(100)}, {Node(100), Node(1)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - circ.add_op(OpType::X, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::Rz, 0.1, {2}); - circ.add_op(OpType::Rz, 0.1, {3}); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - circ.add_op(OpType::X, {3}); - - CompilationUnit cu(circ); - - REQUIRE(pass->apply(cu)); - - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing with measure") { - Architecture arc(std::vector{{Node(0), Node(2)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(2, 2); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - for (unsigned mes = 0; mes < 2; ++mes) { - circ.add_measure(mes, mes); - } - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - } - GIVEN("aas routing - circuit with fewer qubits then nodes in the arch") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(3); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.2, {1}); - circ.add_op(OpType::Rz, 0.3, {2}); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - circuit with fewer qubits then nodes in the arch II") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, - {Node(1), Node(2)}, - {Node(2), Node(3)}, - {Node(3), Node(4)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(3); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.2, {1}); - circ.add_op(OpType::Rz, 0.3, {2}); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - } -} - -SCENARIO("Routing preserves the number of qubits") { - std::vector> cons; - cons.push_back({Node("x", 1), Node("x", 0)}); - cons.push_back({Node("x", 2), Node("x", 1)}); - Architecture arc( - std::vector>(cons.begin(), cons.end())); - PassPtr pass = gen_default_mapping_pass(arc); - Circuit c(3); - c.add_op(OpType::CnX, {2, 1}); - CompilationUnit cu(c); - bool applied = pass->apply(cu); - const Circuit &c1 = cu.get_circ_ref(); - REQUIRE(c.n_qubits() == c1.n_qubits()); -} - -SCENARIO( - "Methods related to correct routing and decomposition of circuits with " - "classical wires.") { - GIVEN( - "A circuit with classical wires on CX gates. No Bridge gate " - "allowed.") { - Architecture test_arc({{0, 1}, {1, 2}}); - Circuit circ(3, 2); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::H, {0}); - circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0, 1}, 0); - circ.add_conditional_gate(OpType::CX, {}, {2, 1}, {0, 1}, 1); - circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0, 1}, 2); - circ.add_conditional_gate(OpType::CX, {}, {2, 1}, {1, 0}, 3); - circ.add_conditional_gate(OpType::CX, {}, {0, 2}, {0, 1}, 0); - Routing test_router(circ, test_arc); - std::pair output = test_router.solve({50, 0, 0, 0}); - Transform::decompose_SWAP_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints( - output.first, test_arc, false, false)); - Transform::decompose_BRIDGE_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints( - output.first, test_arc, false, false)); - } - GIVEN( - "A circuit that requires modification to satisfy architecture " - "constraints.") { - Architecture sg({{0, 1}, {1, 2}, {2, 3}, {3, 4}}); - Circuit circ(5, 1); - circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0}, 1); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}, {1, 3}, {1, 4}, {0, 1}}); - Routing test_router(circ, sg); - std::pair output = test_router.solve({50, 0, 0, 0}); - Transform::decompose_SWAP_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, sg, false, false)); - Transform::decompose_BRIDGE_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, sg, false, false)); - Command classical_com = output.first.get_commands()[0]; - REQUIRE(classical_com.get_args()[0] == output.first.all_bits()[0]); - } - GIVEN("A single Bridge gate with multiple classical wires, decomposed.") { - Architecture arc({{0, 1}, {1, 2}}); - Circuit circ(3, 3); - circ.add_conditional_gate( - OpType::BRIDGE, {}, {0, 1, 2}, {0, 1, 2}, 1); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); - Transform::decompose_BRIDGE_to_CX().apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); - for (Command com : circ.get_commands()) { - REQUIRE(com.get_args()[0] == circ.all_bits()[0]); - REQUIRE(com.get_args()[1] == circ.all_bits()[1]); - REQUIRE(com.get_args()[2] == circ.all_bits()[2]); - } - } - GIVEN("A directed architecture, a single CX gate that requires flipping.") { - Architecture arc(std::vector>{{0, 1}}); - Circuit circ(2, 2); - circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {1, 0}, 0); - circ.add_conditional_gate(OpType::CX, {}, {1, 0}, {0, 1}, 1); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false, false)); - REQUIRE(!respects_connectivity_constraints(circ, arc, true, false)); - Transform::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true, false)); - std::vector all_coms = circ.get_commands(); - REQUIRE(all_coms[0].get_args()[0] == circ.all_bits()[1]); - REQUIRE(all_coms[0].get_args()[1] == circ.all_bits()[0]); - REQUIRE(all_coms[1].get_args()[0] == circ.all_bits()[0]); - REQUIRE(all_coms[1].get_args()[1] == circ.all_bits()[1]); - } - GIVEN( - "A large circuit, with a mixture of conditional CX and CZ with " - "multiple classical wires, non conditional CX and CZ, and single " - "qubit gates.") { - SquareGrid arc(5, 10); - Circuit circ(50, 10); - for (unsigned i = 0; i < 48; i++) { - circ.add_op(OpType::CX, {i, i + 1}); - circ.add_conditional_gate( - OpType::CX, {}, {i + 2, i}, {0, 2, 3, 5}, 1); - circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); - circ.add_conditional_gate( - OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); - circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, 1); - circ.add_conditional_gate( - OpType::CZ, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); - circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); - } - Routing router(circ, arc); - std::pair output = router.solve(); - Transform::decompose_SWAP_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, arc, false, true)); - Transform::decompose_BRIDGE_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, arc, false, true)); - } - GIVEN( - "A large circuit, with a mixture of conditional CX and CX gates with " - "multiple classical wires, non conditional CX and, single qubit " - "gates, and a directed architecture.") { - SquareGrid arc(10, 4, 2); - Circuit circ(60, 10); - for (unsigned i = 0; i < 58; i++) { - circ.add_op(OpType::CX, {i, i + 1}); - circ.add_conditional_gate( - OpType::CX, {}, {i + 2, i}, {0, 2, 3, 5}, 1); - circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); - circ.add_conditional_gate( - OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); - circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, 1); - circ.add_conditional_gate( - OpType::CX, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); - circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); - } - Routing router(circ, arc); - std::pair output = router.solve(); - Transform::decompose_SWAP_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, arc, false, true)); - Transform::decompose_BRIDGE_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, arc, false, true)); - Transform::decompose_CX_directed(arc).apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, arc, true, true)); - } -} -SCENARIO( - "Does copying decompose_SWAP_to_CX pass and applying it to a routed " - "Circuit work correctly?") { - GIVEN("A simple circuit and architecture.") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{0, 3}, - {1, 4}, - {0, 1}, - {2, 0}, - {2, 1}, - {1, 0}, - {0, 4}, - {2, 1}, - {0, 3}}); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - Routing router(circ, arc); - Circuit c = router.solve().first; - Transform T_1 = Transform::decompose_SWAP_to_CX(); - T_1.apply(c); - REQUIRE(c.count_gates(OpType::SWAP) == 0); - } -} - -SCENARIO("Does add_distributed_cx account for incorrect BRIDGE nodes?") { - GIVEN("An incorrect and a correct BRIDGE orientation.") { - Architecture a({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}}); - Circuit c(6); - c.add_op(OpType::CX, {3, 5}); - c.add_op(OpType::CX, {2, 0}); - - Placement placer(a); - qubit_vector_t c_qubits = c.all_qubits(); - node_vector_t a_nodes = a.get_all_nodes_vec(); - - qubit_mapping_t initial_map = { - {c_qubits[0], a_nodes[0]}, {c_qubits[1], a_nodes[1]}, - {c_qubits[2], a_nodes[2]}, {c_qubits[3], a_nodes[3]}, - {c_qubits[4], a_nodes[4]}, {c_qubits[5], a_nodes[5]}}; - - placer.place_with_map(c, initial_map); - - Routing r(c, a); - RoutingTester rt(&r); - - rt.initialise_slicefrontier(); - qubit_bimap_t qbm; - for (unsigned nn = 0; nn <= 5; ++nn) { - qbm.insert({a_nodes[nn], Node(nn)}); - } - - rt.set_qmap(qbm); - - rt.add_distributed_cx(Node(5), Node(3), Node(4)); - rt.add_distributed_cx(Node(2), Node(0), Node(1)); - - std::vector bridge_commands = rt.get_circ()->get_commands(); - qubit_vector_t com_0_qubits = {a_nodes[2], a_nodes[1], a_nodes[0]}; - qubit_vector_t com_1_qubits = {a_nodes[3], a_nodes[4], a_nodes[5]}; - REQUIRE(bridge_commands[0].get_qubits() == com_0_qubits); - REQUIRE(bridge_commands[1].get_qubits() == com_1_qubits); - } - GIVEN("An invalid BRIDGE.") { - Architecture a({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}}); - Circuit c(6); - c.add_op(OpType::CX, {2, 5}); - c.add_op(OpType::CX, {0, 1}); - - Placement placer(a); - qubit_vector_t c_qubits = c.all_qubits(); - node_vector_t a_nodes = a.get_all_nodes_vec(); - - qubit_mapping_t initial_map = { - {c_qubits[0], a_nodes[0]}, {c_qubits[1], a_nodes[1]}, - {c_qubits[2], a_nodes[2]}, {c_qubits[3], a_nodes[3]}, - {c_qubits[4], a_nodes[4]}, {c_qubits[5], a_nodes[5]}}; - - placer.place_with_map(c, initial_map); - - Routing r(c, a); - RoutingTester rt(&r); - - rt.initialise_slicefrontier(); - qubit_bimap_t qbm; - for (unsigned nn = 0; nn <= 5; ++nn) { - qbm.insert({a_nodes[nn], Node(nn)}); - } - - rt.set_qmap(qbm); - - REQUIRE_THROWS_AS( - rt.add_distributed_cx(Node(2), Node(4), Node(5)), BridgeInvalid); - REQUIRE_THROWS_AS( - rt.add_distributed_cx(Node(0), Node(1), Node(3)), BridgeInvalid); - REQUIRE_THROWS_AS( - rt.add_distributed_cx(Node(0), Node(1), Node(2)), BridgeInvalid); - } -} - -} // namespace test_Routing -} // namespace tket diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index f76fbf61b3..802d1035f0 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -346,14 +346,6 @@ SCENARIO("Test Circuit serialization") { } SCENARIO("Test config serializations") { - GIVEN("RoutingConfig") { - RoutingConfig orig(20, 6, 3, 2.5); - nlohmann::json j_config = orig; - RoutingConfig loaded = j_config.get(); - REQUIRE(orig == loaded); - nlohmann::json j_loaded = loaded; - REQUIRE(j_config == j_loaded); - } GIVEN("PlacementConfig") { PlacementConfig orig(5, 20, 100000, 10, 1); nlohmann::json j_config = orig; diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index dbf6ea2474..195b0b501b 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -87,7 +87,6 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/test_PauliGraph.cpp ${TKET_TESTS_DIR}/test_Architectures.cpp ${TKET_TESTS_DIR}/test_Placement.cpp - ${TKET_TESTS_DIR}/test_Routing.cpp ${TKET_TESTS_DIR}/test_MappingFrontier.cpp ${TKET_TESTS_DIR}/test_RoutingMethod.cpp ${TKET_TESTS_DIR}/test_MappingManager.cpp From d46a49c27d9e0d56b645914be9f14a1e546acab3 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Fri, 14 Jan 2022 16:12:07 +0000 Subject: [PATCH 010/146] Improve LexiRoute.cpp coverage --- tket/src/Mapping/LexiRoute.cpp | 34 ++++---- tket/tests/test_LexiRoute.cpp | 145 ++++++++++++++++++++++++++++++++- 2 files changed, 158 insertions(+), 21 deletions(-) diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index 8dc66e1b5f..dfccd3b056 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -213,10 +213,7 @@ swap_set_t LexiRoute::get_candidate_swaps() { Node assigned_first = Node(this->labelling_[interaction.first]); std::vector adjacent_uids_0 = this->architecture_->nodes_at_distance(assigned_first, 1); - if (adjacent_uids_0.size() == 0) { - throw LexiRouteError( - assigned_first.repr() + " has no adjacent Node in Architecture."); - } + TKET_ASSERT(adjacent_uids_0.size() != 0); for (const Node& neighbour : adjacent_uids_0) { if (candidate_swaps.find({neighbour, assigned_first}) == candidate_swaps.end()) { @@ -226,10 +223,7 @@ swap_set_t LexiRoute::get_candidate_swaps() { Node assigned_second = Node(this->labelling_[interaction.second]); std::vector adjacent_uids_1 = this->architecture_->nodes_at_distance(assigned_second, 1); - if (adjacent_uids_1.size() == 0) { - throw LexiRouteError( - assigned_first.repr() + " has no adjacent Node in Architecture."); - } + TKET_ASSERT(adjacent_uids_1.size() != 0); for (const Node& neighbour : adjacent_uids_1) { if (candidate_swaps.find({neighbour, assigned_second}) == candidate_swaps.end()) { @@ -333,13 +327,11 @@ std::pair LexiRoute::check_bridge( const std::pair LexiRoute::pair_distances( const Node& p0_first, const Node& p0_second, const Node& p1_first, const Node& p1_second) const { - if (!this->architecture_->node_exists(p0_first) || - !this->architecture_->node_exists(p0_second) || - !this->architecture_->node_exists(p1_first) || - !this->architecture_->node_exists(p1_second)) { - throw LexiRouteError( - "Node passed to LexiRoute::pair_distances not in architecture."); - } + TKET_ASSERT( + this->architecture_->node_exists(p0_first) && + this->architecture_->node_exists(p0_second) && + this->architecture_->node_exists(p1_first) && + this->architecture_->node_exists(p1_second)); size_t curr_dist1 = this->architecture_->get_distance(p0_first, p0_second); size_t curr_dist2 = this->architecture_->get_distance(p1_first, p1_second); return (curr_dist1 > curr_dist2) ? std::make_pair(curr_dist1, curr_dist2) @@ -351,23 +343,29 @@ void LexiRoute::remove_swaps_decreasing(swap_set_t& swaps) { Node pair_first, pair_second; for (const auto& swap : swaps) { auto it = this->interacting_uids_.find(swap.first); + // => swap.first is in interaction if (it != this->interacting_uids_.end()) { + // find its pair pair_first = Node(it->second); } else { + // => not interacting, assign pair to self (will give lexicographic + // distance 0) pair_first = swap.first; } + // => UnitID in SWAP are interacting if (pair_first == swap.second) { continue; } auto jt = this->interacting_uids_.find(swap.second); + // => swap.second is in interaction if (jt != this->interacting_uids_.end()) { pair_second = Node(jt->second); } else { pair_second = swap.second; } - if (pair_second == swap.first) { - continue; - } + // => UnitID in SWAP are interacting + // Check should alrady be done with earlier continue + TKET_ASSERT(pair_second != swap.first); const std::pair& curr_dists = this->pair_distances(swap.first, pair_first, swap.second, pair_second); diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index 1fc08d98c0..d4642dedea 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -208,7 +208,50 @@ SCENARIO("Test LexiRoute::solve") { lr.solve(4); REQUIRE(mf->circuit_.get_commands().size() == 4); } - GIVEN("Ancilla assignment and then merge preferred.") { + GIVEN("Bridge preferred, conditional CX.") { + Circuit circ(5, 1); + std::vector qubits = circ.all_qubits(); + circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0}, 1); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[1]}, + {qubits[1], nodes[3]}, + {qubits[2], nodes[0]}, + {qubits[3], nodes[7]}, + {qubits[4], nodes[2]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr(shared_arc, mf); + lr.solve(4); + REQUIRE(mf->circuit_.get_commands().size() == 4); + } + GIVEN("Bridge preferred, conditional CZ.") { + Circuit circ(5, 1); + std::vector qubits = circ.all_qubits(); + circ.add_conditional_gate(OpType::CZ, {}, {0, 1}, {0}, 1); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[1]}, + {qubits[1], nodes[3]}, + {qubits[2], nodes[0]}, + {qubits[3], nodes[7]}, + {qubits[4], nodes[2]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr(shared_arc, mf); + lr.solve(4); + REQUIRE(mf->circuit_.get_commands().size() == 4); + } + + GIVEN("Ancilla assignment and then merge preferred, one valid node.") { Circuit circ(3); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); @@ -237,9 +280,41 @@ SCENARIO("Test LexiRoute::solve") { LexiRoute lr0(shared_arc, mf); lr0.solve(20); + REQUIRE(circ.all_qubits()[1] == nodes[4]); + } + + GIVEN("Ancilla assignment and then merge preferred, multiple valid Node.") { + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + std::vector nodes = {Node("test_node", 0), Node("test_node", 1), + Node("test_node", 2), Node("node_test", 3), + Node("node_test", 4), Node("node_test", 5)}; + // A ring, but with two identical length paths where ancilla could be + // assigned + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[2], nodes[5]}, + {nodes[3], nodes[4]}, + {nodes[5], nodes[4]}, + {nodes[4], nodes[0]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + std::map rename_map = { + {qubits[0], nodes[2]}, {qubits[1], nodes[4]}}; + circ.rename_units(rename_map); + + std::shared_ptr mf = + std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); - LexiRoute lr1(shared_arc, mf); - lr1.solve(20); + LexiRoute lr0(shared_arc, mf); + lr0.solve(20); + + REQUIRE(circ.all_qubits()[1] == nodes[5]); } GIVEN( @@ -274,7 +349,71 @@ SCENARIO("Test LexiRoute::solve") { REQUIRE(swap_c.get_args() == uids); REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); } + GIVEN( + "Labelling is required, but there are no free remaining qubits, for one " + "updated label, order 0.") { + Circuit circ(9); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[1], qubits[8]}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); + } + GIVEN( + "Labelling is required, but there are no free remaining qubits, for one " + "updated label, order 0.") { + Circuit circ(9); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[8], qubits[1]}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); + } + GIVEN( + "Labelling is required, but there are no free remaining qubits, for two " + "updated labels.") { + Circuit circ(10); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[9], qubits[8]}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); + } } + SCENARIO("Test LexiRouteRoutingMethod") { std::vector nodes = { Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), From 1f715977f2f11ca60a8ec2127ad5d7e1076708a5 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Fri, 14 Jan 2022 16:39:19 +0000 Subject: [PATCH 011/146] Improve MappingFrontier.cpp test coverage --- tket/src/Mapping/MappingFrontier.cpp | 5 +- tket/tests/test_MappingFrontier.cpp | 84 ++++++++++++++++++++++++++-- 2 files changed, 80 insertions(+), 9 deletions(-) diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index 813fc2d07d..aa130e4c54 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -256,9 +256,7 @@ Subcircuit MappingFrontier::get_frontier_subcircuit( subcircuit_vertices.insert( current_cut.slice->begin(), current_cut.slice->end()); } - if (subcircuit_vertices.size() == 0) { - throw MappingFrontierError("Subcircuit being produced with no gates."); - } + TKET_ASSERT(subcircuit_vertices.size() != 0); return Subcircuit( convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( this->circuit_, this->quantum_boundary)), @@ -266,7 +264,6 @@ Subcircuit MappingFrontier::get_frontier_subcircuit( subcircuit_vertices); } -// TODO: Update to support ancillas void MappingFrontier::update_quantum_boundary_uids( const unit_map_t& relabelled_uids) { for (const std::pair& label : relabelled_uids) { diff --git a/tket/tests/test_MappingFrontier.cpp b/tket/tests/test_MappingFrontier.cpp index 6676a8b104..fb65ce89e4 100644 --- a/tket/tests/test_MappingFrontier.cpp +++ b/tket/tests/test_MappingFrontier.cpp @@ -243,6 +243,80 @@ SCENARIO("Test update_quantum_boundary_uids.") { } SCENARIO("Test permute_subcircuit_q_out_hole.") { + GIVEN("Quantum Boundary and Permutation have size mismatch.") { + Circuit circ(0); + circ.add_q_register("test_nodes", 4); + Qubit q0("test_nodes", 0); + Qubit q1("test_nodes", 1); + Qubit q2("test_nodes", 2); + Qubit q3("test_nodes", 3); + + circ.add_op(OpType::X, {q0}); + circ.add_op(OpType::CX, {q0, q1}); + circ.add_op(OpType::CY, {q2, q3}); + circ.add_op(OpType::CZ, {q0, q2}); + circ.add_op(OpType::CX, {q3, q1}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {q0, nodes[0]}, {q1, nodes[1]}, {q2, nodes[2]}, {q3, nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + + mf.advance_frontier_boundary(shared_arc); + Subcircuit sc = mf.get_frontier_subcircuit(2, 5); + unit_map_t permutation = {{nodes[0], nodes[1]}}; + + REQUIRE_THROWS_AS( + mf.permute_subcircuit_q_out_hole(permutation, sc), + MappingFrontierError); + } + GIVEN( + "Quantum Boundary and permutation have same size, but UnitID don't " + "match.") { + Circuit circ(0); + circ.add_q_register("test_nodes", 4); + Qubit q0("test_nodes", 0); + Qubit q1("test_nodes", 1); + Qubit q2("test_nodes", 2); + Qubit q3("test_nodes", 3); + + circ.add_op(OpType::X, {q0}); + circ.add_op(OpType::CX, {q0, q1}); + circ.add_op(OpType::CY, {q2, q3}); + circ.add_op(OpType::CZ, {q0, q2}); + circ.add_op(OpType::CX, {q3, q1}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {q0, nodes[0]}, {q1, nodes[1]}, {q2, nodes[2]}, {q3, nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + + mf.advance_frontier_boundary(shared_arc); + Subcircuit sc = mf.get_frontier_subcircuit(2, 5); + unit_map_t permutation = { + {nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {Node(4), nodes[0]}}; + + REQUIRE_THROWS_AS( + mf.permute_subcircuit_q_out_hole(permutation, sc), + MappingFrontierError); + } GIVEN("A four qubit subcircuit where every qubit is permuted by given map.") { Circuit circ(0); circ.add_q_register("test_nodes", 4); @@ -251,11 +325,11 @@ SCENARIO("Test permute_subcircuit_q_out_hole.") { Qubit q2("test_nodes", 2); Qubit q3("test_nodes", 3); - Vertex v1 = circ.add_op(OpType::X, {q0}); - Vertex v2 = circ.add_op(OpType::CX, {q0, q1}); - Vertex v3 = circ.add_op(OpType::CY, {q2, q3}); - Vertex v5 = circ.add_op(OpType::CZ, {q0, q2}); - Vertex v7 = circ.add_op(OpType::CX, {q3, q1}); + circ.add_op(OpType::X, {q0}); + circ.add_op(OpType::CX, {q0, q1}); + circ.add_op(OpType::CY, {q2, q3}); + circ.add_op(OpType::CZ, {q0, q2}); + circ.add_op(OpType::CX, {q3, q1}); std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; From b31c2c0d3712e9da8ed84df8b36193327025e354 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 17 Jan 2022 09:56:16 +0000 Subject: [PATCH 012/146] Rmoeve decmopose_module method --- pytket/binders/passes.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pytket/binders/passes.cpp b/pytket/binders/passes.cpp index 63d41cd22b..943f971b88 100644 --- a/pytket/binders/passes.cpp +++ b/pytket/binders/passes.cpp @@ -73,12 +73,6 @@ static PassPtr gen_default_aas_routing_pass( return gen_full_mapping_pass_phase_poly(arc, lookahead, cnotsynthtype); } -static const py::module &decompose_module() { - static const py::module decomposer_ = - py::module::import("pytket.circuit.decompose_classical"); - return decomposer_; -} - const PassPtr &DecomposeClassicalExp() { // a special box decomposer for Circuits containing // ClassicalExpBox From b18a8dbd13434b4442950b9455f12e653d2e9568 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 17 Jan 2022 11:37:40 +0000 Subject: [PATCH 013/146] readd missing test --- tket/tests/test_LexiRoute.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index d4642dedea..7c6e7f1f11 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -314,6 +314,9 @@ SCENARIO("Test LexiRoute::solve") { LexiRoute lr0(shared_arc, mf); lr0.solve(20); + LexiRoute lr1(shared_arc, mf); + lr1.solve(20); + REQUIRE(circ.all_qubits()[1] == nodes[5]); } From 0941f7405de52db194720deea2688440a10a6e26 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 17 Jan 2022 17:07:16 +0000 Subject: [PATCH 014/146] Update LexiRoute test coverage --- tket/tests/test_LexiRoute.cpp | 55 ++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index 7c6e7f1f11..1b24da5da9 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -251,7 +251,7 @@ SCENARIO("Test LexiRoute::solve") { REQUIRE(mf->circuit_.get_commands().size() == 4); } - GIVEN("Ancilla assignment and then merge preferred, one valid node.") { + GIVEN("Ancilla assignment, one valid node.") { Circuit circ(3); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); @@ -279,11 +279,15 @@ SCENARIO("Test LexiRoute::solve") { mf->advance_frontier_boundary(shared_arc); LexiRoute lr0(shared_arc, mf); lr0.solve(20); - REQUIRE(circ.all_qubits()[1] == nodes[4]); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr1(shared_arc, mf); + lr1.solve(20); + REQUIRE(circ.all_qubits()[0] == nodes[3]); } - GIVEN("Ancilla assignment and then merge preferred, multiple valid Node.") { + GIVEN("Ancilla assignment, multiple valid Node.") { Circuit circ(3); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); @@ -291,7 +295,8 @@ SCENARIO("Test LexiRoute::solve") { std::vector nodes = {Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), Node("node_test", 3), - Node("node_test", 4), Node("node_test", 5)}; + Node("node_test", 4), Node("node_test", 5), + Node("node_test", 6)}; // A ring, but with two identical length paths where ancilla could be // assigned Architecture architecture( @@ -299,6 +304,8 @@ SCENARIO("Test LexiRoute::solve") { {nodes[1], nodes[2]}, {nodes[2], nodes[3]}, {nodes[2], nodes[5]}, + {nodes[3], nodes[6]}, + {nodes[5], nodes[6]}, {nodes[3], nodes[4]}, {nodes[5], nodes[4]}, {nodes[4], nodes[0]}}); @@ -314,12 +321,46 @@ SCENARIO("Test LexiRoute::solve") { LexiRoute lr0(shared_arc, mf); lr0.solve(20); + mf->advance_frontier_boundary(shared_arc); LexiRoute lr1(shared_arc, mf); lr1.solve(20); REQUIRE(circ.all_qubits()[1] == nodes[5]); } + GIVEN("Ancilla assignment, one valid Node, with merge.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::H, {qubits[3]}); + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4)}; + // just a ring + + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[4], nodes[0]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + std::map rename_map = { + {qubits[0], nodes[2]}, {qubits[1], nodes[4]}, {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + + std::shared_ptr mf = + std::make_shared(circ); + mf->ancilla_nodes_.insert(nodes[3]); + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr0(shared_arc, mf); + lr0.solve(20); + + REQUIRE(circ.all_qubits()[1] == nodes[4]); + REQUIRE(circ.all_qubits()[0] == nodes[3]); + } GIVEN( "Single best solution, with measurements and classically controlled " "gates.") { @@ -375,17 +416,17 @@ SCENARIO("Test LexiRoute::solve") { } GIVEN( "Labelling is required, but there are no free remaining qubits, for one " - "updated label, order 0.") { + "updated label, order 1.") { Circuit circ(9); std::vector qubits = circ.all_qubits(); - circ.add_op(OpType::CX, {qubits[8], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[8]}); // n0 -- n1 -- n2 -- n3 -- n4 // | | // n5 n7 // | // n6 std::map rename_map = { - {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[0], nodes[0]}, {qubits[8], nodes[1]}, {qubits[2], nodes[2]}, {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; circ.rename_units(rename_map); From d9aa114fbc90d19037d2de75d36c96cf92d1df49 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Tue, 18 Jan 2022 13:07:27 +0000 Subject: [PATCH 015/146] Remove redundant comments --- tket/src/Mapping/MappingFrontier.cpp | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index aa130e4c54..f655d2d849 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -225,8 +225,6 @@ void MappingFrontier::advance_frontier_boundary( * convert_u_frontier_to_edges * Subcircuit requires EdgeVec, not unit_frontier_t as boundary information * Helper Functions to convert types - * TODO: also probably another way of doing this? EdgeVec required for - * subcircuit. Double check with someone who knows better than I... */ EdgeVec convert_u_frontier_to_edges(const unit_frontier_t& u_frontier) { EdgeVec edges; @@ -304,8 +302,6 @@ void MappingFrontier::permute_subcircuit_q_out_hole( } for (const std::pair& pair : this->quantum_boundary->get()) { - // other iteration avoids this... - // TODO: change this when making route different subcircuits auto it = final_permutation.find(pair.first); if (it == final_permutation.end()) { throw MappingFrontierError("Qubit in boundary not in permutation."); @@ -352,17 +348,6 @@ void MappingFrontier::set_quantum_boundary( } } -// /** -// * add_qubit -// * Adds given UnitID as a qubit to held circuit. -// * Updates boundary. -// */ -// void MappingFrontier::add_qubit(const UnitID& uid) { -// Qubit qb(uid); -// this->circuit_.add_qubit(qb); -// this->quantum_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); -// } - /** * add_swap * Inserts an OpType::SWAP gate into the uid_0 and uid_1 edges held in @@ -375,9 +360,6 @@ void MappingFrontier::add_swap(const UnitID& uid_0, const UnitID& uid_1) { auto uid1_in_it = this->quantum_boundary->find(uid_1); // Add Qubit if not in MappingFrontier boundary (i.e. not in circuit) - // If it so happens one of these is an ancilla, it works this out later... - // TODO: make it do that checking here ^^^^ - // implies that it is a new "ancilla" qubit if (uid0_in_it == this->quantum_boundary->end()) { this->add_ancilla(uid_0); uid0_in_it = this->quantum_boundary->find(uid_0); From eba680a7f1a6ebc936a6e71220f8a44d269f5287 Mon Sep 17 00:00:00 2001 From: yao-cqc <75305462+yao-cqc@users.noreply.github.com> Date: Tue, 18 Jan 2022 15:31:42 +0000 Subject: [PATCH 016/146] Feature/reorder multi qubit gates (#157) * Add token swapping stage, add test * Update compilation passes to use new routing * Add json serialization * Continue adding JSON serialisation for routing_config * Improve Json definitions * Update JSON Serialization and use of Barrier * Change from reference_wrapper to shared_ptr * Add JSON_DECL for std::vector * format routing_test * Fix up tests and binders for python * Uncoment measurement tests * rename method to merge_ancilla * debug proptest * Make add_qubit add qubit to unit_bimaps_ if not nullptr * Architectures -> Architecture * Install boost on MacOS. * comments to debug * update proptest to support ancillas properly * remove couts * format * Make Unitary dimensions match * add tket assert for comparison * Update test to check value * add_qubit -> add_ancilla * Remove kwargs formatting from argument * Rename Architecture Methods * rename architecture methods * Allow architecture mapping to take original edges, to calculate Node to size_t mapping * add get_square_grid_edges, to allow fixed tests independent of SquareGrid * use ArchitectureMapping and edges in most tests, instead of Architecture * trivial typos, comments, cmake update * add copyright notices, pragma once, remove semicolon typos * update binders for inheritance and docs * format * Remove NodeGraph * update formatting * Reorder CZ circuits * Revert "Reorder CZ circuits" This reverts commit 9d67720f2047fb6875c6c520f37fb28967b3752e. * Make two methods in MappingFrontier public * Add MultiGateReorderRoutingMethod * Remove unnecessary frontier advancement * Avoid copying the whole MappingFrontier * Remove the edge_in_frontier method * Add comment for vertex rewiring * Allow users to set search limits * Change default max depth/size limits to 10 * Fix using incorrect port colours * Obtain unitid by traversing to frontier instead of inputs * Add test for MultiGateReorderRoutingMethod * Refactor condition checks and rewire * Implement check_method * Add test for routing with LexiRoute Co-authored-by: sjdilkes Co-authored-by: Alec Edgington Co-authored-by: Zen Harper --- pytket/binders/passes.cpp | 1 - tket/src/CMakeLists.txt | 1 + tket/src/Mapping/MappingFrontier.cpp | 33 ++- tket/src/Mapping/MappingFrontier.hpp | 20 ++ tket/src/Mapping/MultiGateReorder.cpp | 272 +++++++++++++++++++ tket/src/Mapping/MultiGateReorder.hpp | 70 +++++ tket/tests/test_MultiGateReorder.cpp | 360 ++++++++++++++++++++++++++ tket/tests/tkettestsfiles.cmake | 1 + 8 files changed, 745 insertions(+), 13 deletions(-) create mode 100644 tket/src/Mapping/MultiGateReorder.cpp create mode 100644 tket/src/Mapping/MultiGateReorder.hpp create mode 100644 tket/tests/test_MultiGateReorder.cpp diff --git a/pytket/binders/passes.cpp b/pytket/binders/passes.cpp index 943f971b88..8206680c15 100644 --- a/pytket/binders/passes.cpp +++ b/pytket/binders/passes.cpp @@ -33,7 +33,6 @@ static PassPtr gen_cx_mapping_pass_kwargs( const Architecture &arc, const PlacementPtr &placer, py::kwargs kwargs) { RoutingMethodPtr method = std::make_shared(100); std::vector config = {method}; - if (kwargs.contains("config")) { config = py::cast>(kwargs["config"]); } diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index cbcff7158f..18eb9e5793 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -232,6 +232,7 @@ set(TKET_SOURCES ${TKET_MAPPING_DIR}/LexicographicalComparison.cpp ${TKET_MAPPING_DIR}/LexiRoute.cpp ${TKET_MAPPING_DIR}/RoutingMethodJson.cpp + ${TKET_MAPPING_DIR}/MultiGateReorder.cpp ${TKET_MAPPING_DIR}/Verification.cpp diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index aa130e4c54..84e792741a 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -20,11 +20,6 @@ UnitID get_unitid_from_unit_frontier( std::string("Edge provided not in unit_frontier_t object.")); } -/** - * quantum_boundary stored as vertport so that correct edge can be recovered - * after subcircuit substitution method uses Vertex and port_t and - * Circuit::get_nth_out_edge to generate unit_frontier_t object - */ std::shared_ptr frontier_convert_vertport_to_edge( const Circuit& circuit, const std::shared_ptr& u_frontier) { @@ -58,6 +53,27 @@ MappingFrontier::MappingFrontier(Circuit& _circuit) : circuit_(_circuit) { } } +MappingFrontier::MappingFrontier(const MappingFrontier& mapping_frontier) + : circuit_(mapping_frontier.circuit_) { + this->quantum_boundary = std::make_shared(); + this->classical_boundary = std::make_shared(); + for (const std::pair& pair : + mapping_frontier.quantum_boundary->get()) { + this->quantum_boundary->insert({pair.first, pair.second}); + } + for (const std::pair& pair : + mapping_frontier.classical_boundary->get()) { + EdgeVec edges; + for (const Edge& edge : pair.second) { + edges.push_back(edge); + } + this->classical_boundary->insert({pair.first, edges}); + } + for (const Node& node : mapping_frontier.ancilla_nodes_) { + this->ancilla_nodes_.insert(node); + } +} + void MappingFrontier::advance_next_2qb_slice(unsigned max_advance) { bool boundary_updated = false; unsigned loop = 0; @@ -221,13 +237,6 @@ void MappingFrontier::advance_frontier_boundary( return; } -/** - * convert_u_frontier_to_edges - * Subcircuit requires EdgeVec, not unit_frontier_t as boundary information - * Helper Functions to convert types - * TODO: also probably another way of doing this? EdgeVec required for - * subcircuit. Double check with someone who knows better than I... - */ EdgeVec convert_u_frontier_to_edges(const unit_frontier_t& u_frontier) { EdgeVec edges; for (const std::pair& pair : u_frontier.get()) { diff --git a/tket/src/Mapping/MappingFrontier.hpp b/tket/src/Mapping/MappingFrontier.hpp index f1eb102b72..afc4128e58 100644 --- a/tket/src/Mapping/MappingFrontier.hpp +++ b/tket/src/Mapping/MappingFrontier.hpp @@ -17,6 +17,23 @@ class MappingFrontierError : public std::logic_error { : std::logic_error(message) {} }; +/** + * quantum_boundary stored as vertport so that correct edge can be recovered + * after subcircuit substitution method uses Vertex and port_t and + * Circuit::get_nth_out_edge to generate unit_frontier_t object + */ +std::shared_ptr frontier_convert_vertport_to_edge( + const Circuit& circuit, + const std::shared_ptr& u_frontier); + +/** + * convert_u_frontier_to_edges + * Subcircuit requires EdgeVec, not unit_frontier_t as boundary information + * Helper Functions to convert types + * TODO: also probably another way of doing this? EdgeVec required for + * subcircuit. Double check with someone who knows better than I... + */ +EdgeVec convert_u_frontier_to_edges(const unit_frontier_t& u_frontier); struct MappingFrontier { /** * VertPort instead of Edge as Edge changes in substitution, but Vertex and @@ -36,6 +53,9 @@ struct MappingFrontier { MappingFrontier(Circuit& _circuit); + // copy constructor + MappingFrontier(const MappingFrontier& mapping_frontier); + /** * Given some Circuit Cut (or routed/unrouted boundary), advances the cut to * the next cut of just two-qubit vertices, not including the current diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp new file mode 100644 index 0000000000..a7216a2b3a --- /dev/null +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -0,0 +1,272 @@ +#include "Mapping/MultiGateReorder.hpp" + +#include "Mapping/MappingFrontier.hpp" + +namespace tket { + +MultiGateReorder::MultiGateReorder( + const ArchitecturePtr &_architecture, + std::shared_ptr &_mapping_frontier) + : architecture_(_architecture), mapping_frontier_(_mapping_frontier) { + // This needs to be updated every time the frontier changes + this->u_frontier_edges_ = + convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( + _mapping_frontier->circuit_, _mapping_frontier->quantum_boundary)); +} + +// Traverse the DAG to the quantum frontier encoded in q_boundary_map +// to find the UnitID associated with an VertPort +UnitID get_unitid_from_vertex_port( + const Circuit &circ, const VertPort &vert_port, + const std::map &q_boundary_map) { + VertPort current_vert_port = vert_port; + while (true) { + auto it = q_boundary_map.find(current_vert_port); + if (it != q_boundary_map.end()) { + return it->second; + } + Edge current_e = circ.get_nth_out_edge( + current_vert_port.first, current_vert_port.second); + Vertex prev_vert; + Edge prev_e; + std::tie(prev_vert, prev_e) = + circ.get_prev_pair(current_vert_port.first, current_e); + current_vert_port = {prev_vert, circ.get_source_port(prev_e)}; + } +} + +bool is_multiq_quantum_gate(const Circuit &circ, const Vertex &vert) { + Op_ptr op = circ.get_Op_ptr_from_Vertex(vert); + return ( + op->get_desc().is_gate() && circ.n_in_edges(vert) > 1 && + circ.n_in_edges_of_type(vert, EdgeType::Quantum) == + circ.n_in_edges(vert) && + circ.n_out_edges_of_type(vert, EdgeType::Quantum) == + circ.n_out_edges(vert)); +} + +bool is_physically_permitted( + const Circuit &circ, const ArchitecturePtr &arc_ptr, const Vertex &vert, + const std::map &q_boundary_map) { + std::vector nodes; + for (port_t port = 0; port < circ.n_ports(vert); ++port) { + nodes.push_back( + Node(get_unitid_from_vertex_port(circ, {vert, port}, q_boundary_map))); + } + + return arc_ptr->valid_operation(nodes); +} + +// This method will try to commute a vertex to the quantum frontier +std::optional> try_find_commute_edges( + const Circuit &circ, const EdgeVec &frontier_edges, const Vertex &vert) { + // Initialize to be the in_edges for the given vertex + EdgeVec current_edges = circ.get_in_edges(vert); + EdgeVec initial_edges(current_edges.begin(), current_edges.end()); + + Op_ptr current_op = circ.get_Op_ptr_from_Vertex(vert); + // Record the colour of each port of the vertex. + std::vector> colours; + for (const Edge &edge : current_edges) { + port_t target_port = circ.get_target_port(edge); + std::optional colour = current_op->commuting_basis(target_port); + colours.push_back(colour); + } + // Stores all edges which the vertex can be commuted to + EdgeVec dest_edges; + while (true) { + // The vertex can be commuted to the front + bool success = true; + for (unsigned i = 0; i < current_edges.size(); ++i) { + // Check if the edge is already in the quantum frontier + if (std::find( + frontier_edges.begin(), frontier_edges.end(), current_edges[i]) != + frontier_edges.end()) { + dest_edges.push_back(current_edges[i]); + continue; + } + // Check prev_op is a gate + Vertex prev_vert = circ.source(current_edges[i]); + Op_ptr prev_op = circ.get_Op_ptr_from_Vertex(prev_vert); + if (!prev_op->get_desc().is_gate()) { + // not commute + return std::nullopt; + } + + // Check commute + port_t source_port = circ.get_source_port(current_edges[i]); + if (!prev_op->commutes_with_basis(colours[i], source_port)) { + // not commute + return std::nullopt; + } else { + // Update dest_edges + Vertex prev_prev_v; + Edge prev_e; + std::tie(prev_prev_v, prev_e) = + circ.get_prev_pair(prev_vert, current_edges[i]); + dest_edges.push_back(prev_e); + } + // Only true if all edges are in frontier + success = false; + } + if (success) { + std::pair p(initial_edges, dest_edges); + return p; + } else { + current_edges = dest_edges; + dest_edges = {}; + } + } +} + +void partial_rewire( + const Vertex &vert, Circuit &circ, EdgeVec &src_edges, + EdgeVec &dest_edges) { + // move the vertex to the frontier + // Notice that if one of the vertex's in edge is already a destination + // edge then the circuit::remove_vertex will delete the destination edge + // hence circuit::rewire would result in an error due to the missing edge. + // We need a partial rewire for that reason. + // Example: + // Moving the second vertex (CX gate) to the front we only need to rewire + // the "x" part. + // --o----- + // | + // --x--x-- + // | + // -----o-- + + for (unsigned i = 0; i < dest_edges.size(); i++) { + Edge &dest_in_edge = dest_edges[i]; + Edge &curr_in_edge = src_edges[i]; + // If the vertex is already connected to an edge in the frontier, do + // nothing. + if (dest_in_edge != curr_in_edge) { + // Add first edge + Vertex dest_prev_vert = circ.source(dest_in_edge); + circ.add_edge( + {dest_prev_vert, circ.get_source_port(dest_in_edge)}, + {vert, circ.get_target_port(curr_in_edge)}, EdgeType::Quantum); + // Add second edge + Vertex curr_next_vert; + Edge curr_out_edge; + Vertex dest_next_vert = circ.target(dest_in_edge); + std::tie(curr_next_vert, curr_out_edge) = + circ.get_next_pair(vert, curr_in_edge); + circ.add_edge( + {vert, circ.get_source_port(curr_out_edge)}, + {dest_next_vert, circ.get_target_port(dest_in_edge)}, + EdgeType::Quantum); + // Add third edge + Vertex curr_prev_vert = circ.source(curr_in_edge); + circ.add_edge( + {curr_prev_vert, circ.get_source_port(curr_in_edge)}, + {curr_next_vert, circ.get_target_port(curr_out_edge)}, + EdgeType::Quantum); + // Remove edges + circ.remove_edge(dest_in_edge); + circ.remove_edge(curr_in_edge); + circ.remove_edge(curr_out_edge); + } + } +} + +void MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { + // Assume the frontier has been advanced + + // store a copy of the original this->mapping_frontier_->quantum_boundray + // this object will be updated and reset throughout the procedure + // so need to return it to original setting at end + // also create a map for getting UnitID from VertPort + std::map q_boundary_map; + unit_vertport_frontier_t copy; + for (const std::pair &pair : + this->mapping_frontier_->quantum_boundary->get()) { + copy.insert({pair.first, pair.second}); + q_boundary_map.insert({pair.second, pair.first}); + } + // Get a subcircuit only for iterating vertices + Subcircuit circ = + this->mapping_frontier_->get_frontier_subcircuit(max_depth, max_size); + // since we assume that the frontier has been advanced + // we are certain that any multi-q vert lies after the frontier + for (const Vertex &vert : circ.verts) { + // Check if the vertex is: + // 1. physically permitted + // 2. is a multi qubit quantum operation without classical controls + if (is_multiq_quantum_gate(this->mapping_frontier_->circuit_, vert) && + is_physically_permitted( + this->mapping_frontier_->circuit_, this->architecture_, vert, + q_boundary_map)) { + std::optional> commute_pairs = + try_find_commute_edges( + this->mapping_frontier_->circuit_, this->u_frontier_edges_, vert); + + if (commute_pairs != std::nullopt) { + partial_rewire( + vert, this->mapping_frontier_->circuit_, (*commute_pairs).first, + (*commute_pairs).second); + // Update the frontier + this->mapping_frontier_->advance_frontier_boundary(this->architecture_); + this->u_frontier_edges_ = + convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( + this->mapping_frontier_->circuit_, + this->mapping_frontier_->quantum_boundary)); + // Update the map + q_boundary_map.clear(); + for (const std::pair &pair : + this->mapping_frontier_->quantum_boundary->get()) { + q_boundary_map.insert({pair.second, pair.first}); + } + } + } + } + // Return the quantum boundary to its original setting + this->mapping_frontier_->set_quantum_boundary(copy); +} + +MultiGateReorderRoutingMethod::MultiGateReorderRoutingMethod( + unsigned _max_depth, unsigned _max_size) + : max_depth_(_max_depth), max_size_(_max_size){}; + +bool MultiGateReorderRoutingMethod::check_method( + const std::shared_ptr &mapping_frontier, + const ArchitecturePtr &architecture) const { + const EdgeVec u_frontier_edges = + convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( + mapping_frontier->circuit_, mapping_frontier->quantum_boundary)); + std::map q_boundary_map; + for (const std::pair &pair : + mapping_frontier->quantum_boundary->get()) { + q_boundary_map.insert({pair.second, pair.first}); + } + + Subcircuit circ = mapping_frontier->get_frontier_subcircuit( + this->max_depth_, this->max_size_); + // since we assume that the frontier has been advanced + // we are certain that any multi-q vert lies after the frontier + for (const Vertex &vert : circ.verts) { + if (is_multiq_quantum_gate(mapping_frontier->circuit_, vert) && + is_physically_permitted( + mapping_frontier->circuit_, architecture, vert, q_boundary_map)) { + std::optional> commute_pairs = + try_find_commute_edges( + mapping_frontier->circuit_, u_frontier_edges, vert); + + if (commute_pairs != std::nullopt) { + return true; + } + } + } + return false; +} + +unit_map_t MultiGateReorderRoutingMethod::routing_method( + std::shared_ptr &mapping_frontier, + const ArchitecturePtr &architecture) const { + MultiGateReorder mr(architecture, mapping_frontier); + mr.solve(this->max_depth_, this->max_size_); + return {}; +} + +} // namespace tket diff --git a/tket/src/Mapping/MultiGateReorder.hpp b/tket/src/Mapping/MultiGateReorder.hpp new file mode 100644 index 0000000000..631d2b012b --- /dev/null +++ b/tket/src/Mapping/MultiGateReorder.hpp @@ -0,0 +1,70 @@ +#ifndef _TKET_MultiGateReorder_H_ +#define _TKET_MultiGateReorder_H_ + +#include "Mapping/MappingFrontier.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class MultiGateReorder { + public: + /** + * Class Constructor + * @param _architecture Architecture object added operations must respect + * @param _mapping_frontier Contains Circuit object to be modified + */ + MultiGateReorder( + const ArchitecturePtr& _architecture, + std::shared_ptr& _mapping_frontier); + + /** + * Try to commute any multi-qubit gates to the quantum frontier + * @param max_depth Maximum number of layers of gates checked for commutation. + * @param max_size Maximum number of gates checked for commutation. + */ + void solve(unsigned max_depth, unsigned max_size); + + private: + // Architecture all new physical operations must respect + ArchitecturePtr architecture_; + std::shared_ptr mapping_frontier_; + EdgeVec u_frontier_edges_; +}; + +class MultiGateReorderRoutingMethod : public RoutingMethod { + public: + /** + * Checking and Routing methods redefined using MultiGateReorder. + * @param _max_depth Maximum number of layers of gates checked for + * commutation. + * @param _max_size Maximum number of gates checked for commutation. + */ + MultiGateReorderRoutingMethod( + unsigned _max_depth = 10, unsigned _max_size = 10); + + /** + * @return true if method can route subcircuit, false if not + */ + bool check_method( + const std::shared_ptr& /*mapping_frontier*/, + const ArchitecturePtr& /*architecture*/) const; + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + unit_map_t routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const; + + private: + unsigned max_depth_; + unsigned max_size_; +}; + +} // namespace tket + +#endif \ No newline at end of file diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp new file mode 100644 index 0000000000..75621ffa3f --- /dev/null +++ b/tket/tests/test_MultiGateReorder.cpp @@ -0,0 +1,360 @@ +#include + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/MultiGateReorder.hpp" +#include "Predicates/Predicates.hpp" +#include "Simulation/CircuitSimulator.hpp" +#include "Simulation/ComparisonFunctions.hpp" + +namespace tket { +SCENARIO("Reorder circuits") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("Simple CZ circuit.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + std::shared_ptr mf = + std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorder mr(shared_arc, mf); + mr.solve(20, 20); + std::vector commands = circ.get_commands(); + for (unsigned i = 0; i < 2; i++) { + std::vector nodes; + for (auto arg : commands[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(shared_arc->valid_operation(nodes)); + } + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + } + + GIVEN("Simple CZ circuit 2.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[1], qubits[0]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[3], qubits[0]}); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[3], qubits[2]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + std::shared_ptr mf = + std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorder mr(shared_arc, mf); + mr.solve(20, 20); + std::vector commands = circ.get_commands(); + for (unsigned i = 0; i < 4; i++) { + std::vector nodes; + for (auto arg : commands[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(shared_arc->valid_operation(nodes)); + } + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + } + GIVEN("Simple CZ circuit with single_qs.") { + Circuit circ(4, 1); + std::vector qubits = circ.all_qubits(); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[1], qubits[0]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::Rz, 0.5, {qubits[0]}); + circ.add_op(OpType::Rz, 0.5, {qubits[2]}); + circ.add_op(OpType::Rz, 0.5, {qubits[3]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::Measure, {qubits[2], Bit(0)}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[3], qubits[0]}); + // Physically valid operations + circ.add_op(OpType::H, {qubits[3]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[2]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + + std::shared_ptr mf = + std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorder mr(shared_arc, mf); + mr.solve(20, 20); + std::vector commands = circ.get_commands(); + for (unsigned i = 0; i < 2; i++) { + std::vector nodes; + for (auto arg : commands[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(shared_arc->valid_operation(nodes)); + } + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + } + + GIVEN("Circuit with multi qubit gates.") { + Circuit circ(4, 1); + std::vector qubits = circ.all_qubits(); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[1], qubits[0]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + // Physically valid operations + circ.add_op(OpType::CCX, {qubits[1], qubits[2], qubits[3]}); + circ.add_op(OpType::Rx, 0.5, {qubits[3]}); + circ.add_op(OpType::CX, {qubits[2], qubits[3]}); + circ.add_op(OpType::Rz, 0.5, {qubits[0]}); + circ.add_op(OpType::CRz, 0.5, {qubits[0], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.2, {qubits[0], qubits[1]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[3], qubits[0]}); + // Physically valid operations + circ.add_op(OpType::H, {qubits[3]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[2]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + + std::shared_ptr mf = + std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorder mr(shared_arc, mf); + mr.solve(20, 20); + std::vector commands = circ.get_commands(); + for (unsigned i = 0; i < 6; i++) { + std::vector nodes; + for (auto arg : commands[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(shared_arc->valid_operation(nodes)); + } + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + } +} + +SCENARIO("Reorder circuits with limited search space") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("Simple CZ circuit.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + std::shared_ptr mf = + std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorder mr(shared_arc, mf); + mr.solve(3, 3); + // Check only the first valid CZ get commuted to the front + std::vector commands = circ.get_commands(); + REQUIRE(shared_arc->valid_operation( + {Node(commands[0].get_args()[0]), Node(commands[0].get_args()[1])})); + REQUIRE(!shared_arc->valid_operation( + {Node(commands[1].get_args()[0]), Node(commands[1].get_args()[1])})); + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + } +} + +SCENARIO("Test MultiGateReorderRoutingMethod") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("Simple CZ circuit.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + std::shared_ptr mf = + std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorderRoutingMethod mrrm; + REQUIRE(mrrm.check_method(mf, shared_arc)); + + unit_map_t init_map = mrrm.routing_method(mf, shared_arc); + REQUIRE(init_map.size() == 0); + std::vector commands = circ.get_commands(); + for (unsigned i = 0; i < 5; i++) { + std::vector nodes; + for (auto arg : commands[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(shared_arc->valid_operation(nodes)); + } + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + + // Test with limits + Circuit circ2(circ_copy); + + std::shared_ptr mf2 = + std::make_shared(circ2); + mf2->advance_frontier_boundary(shared_arc); + MultiGateReorderRoutingMethod mrrm2(4, 4); + REQUIRE(mrrm2.check_method(mf2, shared_arc)); + + unit_map_t init_map2 = mrrm2.routing_method(mf2, shared_arc); + REQUIRE(init_map2.size() == 0); + std::vector commands2 = circ2.get_commands(); + for (unsigned i = 0; i < 4; i++) { + std::vector nodes; + for (auto arg : commands2[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(shared_arc->valid_operation(nodes)); + } + std::vector nodes; + for (auto arg : commands2[4].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(!shared_arc->valid_operation(nodes)); + const auto u2 = tket_sim::get_unitary(circ2); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u2, u1, tket_sim::MatrixEquivalence::EQUAL)); + } +} + +SCENARIO("Test MappingManager with MultiGateReorderRoutingMethod") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + GIVEN("Simple CZ, CX circuit.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + + // Physically invalid operations + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::CX, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + MappingManager mm(shared_arc); + // MultiGateReorderRoutingMethod should first commute the last two gates + // then only one swap is needed. + std::vector vrm = { + std::make_shared(), + std::make_shared(10)}; + bool res = mm.route_circuit(circ, vrm); + PredicatePtr routed_correctly = + std::make_shared(architecture); + REQUIRE(routed_correctly->verify(circ)); + REQUIRE(circ.count_gates(OpType::SWAP) == 1); + std::vector commands = circ.get_commands(); + REQUIRE(commands.size() == 5); + Command swap_c = commands[2]; + unit_vector_t uids = {nodes[1], nodes[2]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + } +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 65835b6484..754e86afe5 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -93,6 +93,7 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/test_MappingManager.cpp ${TKET_TESTS_DIR}/test_LexicographicalComparison.cpp ${TKET_TESTS_DIR}/test_LexiRoute.cpp + ${TKET_TESTS_DIR}/test_MultiGateReorder.cpp ${TKET_TESTS_DIR}/test_DeviceCharacterisation.cpp ${TKET_TESTS_DIR}/test_Clifford.cpp ${TKET_TESTS_DIR}/test_MeasurementSetup.cpp From c9cf8b224042c236071dfcdf67f5adb3f2849c0e Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Wed, 19 Jan 2022 15:53:31 +0000 Subject: [PATCH 017/146] modify TKET_ASSERT: allow extra messages, catch exceptions in evaluation --- tket/src/CMakeLists.txt | 1 + tket/src/Utils/Assert.hpp | 67 +++++++++++++++++++++++++++----- tket/src/Utils/AssertMessage.cpp | 34 ++++++++++++++++ tket/src/Utils/AssertMessage.hpp | 56 ++++++++++++++++++++++++++ 4 files changed, 148 insertions(+), 10 deletions(-) create mode 100644 tket/src/Utils/AssertMessage.cpp create mode 100644 tket/src/Utils/AssertMessage.hpp diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index 18eb9e5793..cedd45ee63 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -242,6 +242,7 @@ set(TKET_SOURCES ${TKET_AAS_DIR}/SteinerForest.cpp # Utils + ${TKET_UTILS_DIR}/AssertMessage.cpp ${TKET_UTILS_DIR}/TketLog.cpp ${TKET_UTILS_DIR}/UnitID.cpp ${TKET_UTILS_DIR}/HelperFunctions.cpp diff --git a/tket/src/Utils/Assert.hpp b/tket/src/Utils/Assert.hpp index c771e9dff5..6a80eadb83 100644 --- a/tket/src/Utils/Assert.hpp +++ b/tket/src/Utils/Assert.hpp @@ -17,18 +17,65 @@ #include #include +#include "AssertMessage.hpp" #include "TketLog.hpp" /** * If the condition `b` is not satisfied, log a diagnostic message and abort. + * But note that the message includes only the raw C++ source code for b, + * not the actual values of x,y in conditions like "xcritical(msg.str()); \ - std::abort(); \ - } \ - } while (0) +#define TKET_ASSERT(b) \ + /* GCOVR_EXCL_START */ \ + do { \ + try { \ + if (!(b)) { \ + std::stringstream msg; \ + msg << "Assertion '" << #b << "' (" << __FILE__ << " : " << __func__ \ + << " : " << __LINE__ << ") failed: aborting."; \ + tket::tket_log()->critical(msg.str()); \ + std::abort(); \ + } \ + } catch (const AssertMessage::MessageData& e1) { \ + std::stringstream msg; \ + msg << "Assertion "; \ + if (e1.verbose) { \ + msg << "'" << #b << "' "; \ + } \ + msg << "(" << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") failed: '" << e1.what() << "': aborting."; \ + tket::tket_log()->critical(msg.str()); \ + std::abort(); \ + } catch (const std::exception& e2) { \ + std::stringstream msg; \ + msg << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ + << " : " << __func__ << " : " << __LINE__ \ + << ") threw unexpected exception: '" << e2.what() << "': aborting."; \ + tket::tket_log()->critical(msg.str()); \ + std::abort(); \ + } catch (...) { \ + std::stringstream msg; \ + msg << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ + << " : " << __func__ << " : " << __LINE__ \ + << ") threw unknown exception. Aborting."; \ + tket::tket_log()->critical(msg.str()); \ + std::abort(); \ + } \ + } while (0) /* GCOVR_EXCL_STOP */ diff --git a/tket/src/Utils/AssertMessage.cpp b/tket/src/Utils/AssertMessage.cpp new file mode 100644 index 0000000000..6746810018 --- /dev/null +++ b/tket/src/Utils/AssertMessage.cpp @@ -0,0 +1,34 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "AssertMessage.hpp" + +namespace tket { + +AssertMessage::AssertMessage() : m_verbose(false) {} + +AssertMessage AssertMessage::verbose() { + AssertMessage message; + message.m_verbose = true; + return message; +} + +AssertMessage::MessageData::MessageData(const std::string& str, bool vbose) + : std::runtime_error(str), verbose(vbose) {} + +AssertMessage::operator bool() const { + throw MessageData(m_ss.str(), m_verbose); +} + +} // namespace tket diff --git a/tket/src/Utils/AssertMessage.hpp b/tket/src/Utils/AssertMessage.hpp new file mode 100644 index 0000000000..1bc40d46e6 --- /dev/null +++ b/tket/src/Utils/AssertMessage.hpp @@ -0,0 +1,56 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +namespace tket { + +/** This is for use with TKET_ASSERT, when we want to give a more detailed + * error message than just the assertion code and location. + */ +class AssertMessage { + public: + /** Construct the object (the default non-verbose version) to begin writing to + * the stream. */ + AssertMessage(); + + /** Get a verbose object (not the default). */ + static AssertMessage verbose(); + + /** Thrown when the message construction is finished, to store the necessary + * data. */ + struct MessageData : public std::runtime_error { + bool verbose; + MessageData(const std::string& str, bool verbose); + }; + + /** Throws a MessageData object when called, with the message. */ + operator bool() const; + + /** Every streamable object can be written to the stream. */ + template + AssertMessage& operator<<(const T& x) { + m_ss << x; + return *this; + } + + private: + bool m_verbose; + std::stringstream m_ss; +}; + +} // namespace tket From b2b068033dc02d1ac5d891374439467dd70819f9 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Wed, 19 Jan 2022 15:59:42 +0000 Subject: [PATCH 018/146] replace some exceptions with tket asserts; should be ignored by test coverage --- tket/src/Graphs/AdjacencyData.cpp | 89 +++++++------------ tket/src/Graphs/BruteForceColouring.cpp | 14 +-- tket/src/Simulation/BitOperations.cpp | 6 +- .../src/TokenSwapping/ArchitectureMapping.cpp | 63 +++++++------ tket/src/TokenSwapping/BestFullTsa.cpp | 3 +- tket/src/TokenSwapping/BestFullTsa.hpp | 2 +- 6 files changed, 77 insertions(+), 100 deletions(-) diff --git a/tket/src/Graphs/AdjacencyData.cpp b/tket/src/Graphs/AdjacencyData.cpp index c612b4ed69..6fae9f3699 100644 --- a/tket/src/Graphs/AdjacencyData.cpp +++ b/tket/src/Graphs/AdjacencyData.cpp @@ -19,6 +19,8 @@ #include #include +#include "Utils/Assert.hpp" + using std::exception; using std::map; using std::runtime_error; @@ -63,13 +65,12 @@ string AdjacencyData::to_string() const { const set& AdjacencyData::get_neighbours( std::size_t vertex) const { - if (vertex >= m_cleaned_data.size()) { - stringstream ss; - ss << "AdjacencyData: get_neighbours called with invalid vertex " << vertex - << "; there are only " << m_cleaned_data.size() << " vertices"; - - throw runtime_error(ss.str()); - } + TKET_ASSERT( + vertex < m_cleaned_data.size() || + AssertMessage() + << "AdjacencyData: get_neighbours called with invalid vertex " + << vertex << "; there are only " << m_cleaned_data.size() + << " vertices"); return m_cleaned_data[vertex]; } @@ -90,28 +91,21 @@ std::size_t AdjacencyData::get_number_of_edges() const { } bool AdjacencyData::add_edge(std::size_t i, std::size_t j) { - try { - const bool exists = edge_exists(i, j); - if (exists) { - return false; - } - m_cleaned_data[i].insert(j); - m_cleaned_data[j].insert(i); - return true; - } catch (const exception& e) { - stringstream ss; - ss << "add_edge : " << e.what(); - throw runtime_error(ss.str()); + const bool exists = edge_exists(i, j); + if (exists) { + return false; } + m_cleaned_data[i].insert(j); + m_cleaned_data[j].insert(i); + return true; } bool AdjacencyData::edge_exists(std::size_t i, std::size_t j) const { - if (i >= m_cleaned_data.size() || j >= m_cleaned_data.size()) { - stringstream ss; - ss << "AdjacencyData: edge_exists called with vertices " << i << ", " << j - << ", but there are only " << m_cleaned_data.size() << " vertices"; - throw runtime_error(ss.str()); - } + TKET_ASSERT( + (i < m_cleaned_data.size() && j < m_cleaned_data.size()) || + AssertMessage() << "edge_exists called with vertices " << i << ", " << j + << ", but there are only " << m_cleaned_data.size() + << " vertices"); return m_cleaned_data[i].count(j) != 0; } @@ -136,16 +130,10 @@ AdjacencyData::AdjacencyData( } } m_cleaned_data.resize(number_of_vertices); - try { - for (const auto& entry : raw_data) { - for (std::size_t neighbour : entry.second) { - add_edge(entry.first, neighbour); - } + for (const auto& entry : raw_data) { + for (std::size_t neighbour : entry.second) { + add_edge(entry.first, neighbour); } - } catch (const exception& e) { - stringstream ss; - ss << "AdjacencyData: constructing from map:" << e.what(); - throw runtime_error(ss.str()); } } @@ -153,28 +141,19 @@ AdjacencyData::AdjacencyData( const vector>& raw_data, bool allow_loops) { m_cleaned_data.resize(raw_data.size()); - try { - for (std::size_t i = 0; i < raw_data.size(); ++i) { - for (std::size_t j : raw_data[i]) { - if (i == j && !allow_loops) { - stringstream ss; - ss << "vertex " << i << " has a loop."; - throw runtime_error(ss.str()); - } - if (j > raw_data.size()) { - stringstream ss; - ss << "vertex " << i << " has illegal neighbour vertex " << j; - throw runtime_error(ss.str()); - } - m_cleaned_data[i].insert(j); - m_cleaned_data[j].insert(i); - } + for (std::size_t i = 0; i < m_cleaned_data.size(); ++i) { + for (std::size_t j : raw_data[i]) { + TKET_ASSERT( + i != j || allow_loops || + AssertMessage() << "vertex " << i << " out of " + << m_cleaned_data.size() << " has a loop."); + TKET_ASSERT( + j < m_cleaned_data.size() || + AssertMessage() << "vertex " << i << " has illegal neighbour vertex " + << j << ", the size is " << m_cleaned_data.size()); + m_cleaned_data[i].insert(j); + m_cleaned_data[j].insert(i); } - } catch (const exception& e) { - stringstream ss; - ss << "AdjacencyData: we have " << raw_data.size() - << " vertices: " << e.what(); - throw runtime_error(ss.str()); } } diff --git a/tket/src/Graphs/BruteForceColouring.cpp b/tket/src/Graphs/BruteForceColouring.cpp index 87ac8d8b66..aa2ac22ad8 100644 --- a/tket/src/Graphs/BruteForceColouring.cpp +++ b/tket/src/Graphs/BruteForceColouring.cpp @@ -215,13 +215,13 @@ BruteForceColouring::BruteForceColouring( } throw std::runtime_error("suggested_number_of_colours hit number_of_nodes"); } catch (const std::exception& e) { - std::stringstream ss; - ss << "BruteForceColouring: initial_suggested_number_of_colours = " - << initial_suggested_number_of_colours - << ", reached suggested_number_of_colours = " - << suggested_number_of_colours << ", had " << number_of_nodes - << " nodes. Error: " << e.what() << priority.print_raw_data(); - throw std::runtime_error(ss.str()); + TKET_ASSERT( + AssertMessage() << "initial_suggested_number_of_colours = " + << initial_suggested_number_of_colours + << ", reached suggested_number_of_colours = " + << suggested_number_of_colours << ", had " + << number_of_nodes << " nodes. Error: " << e.what() + << priority.print_raw_data()); } } diff --git a/tket/src/Simulation/BitOperations.cpp b/tket/src/Simulation/BitOperations.cpp index 6d4cf9d223..4cae118f33 100644 --- a/tket/src/Simulation/BitOperations.cpp +++ b/tket/src/Simulation/BitOperations.cpp @@ -16,6 +16,8 @@ #include +#include "Utils/Assert.hpp" + namespace tket { namespace tket_sim { namespace internal { @@ -44,9 +46,7 @@ ExpansionData get_expansion_data( auto test_bit = next_bit; for (unsigned left_shift_arg = 0;; ++left_shift_arg) { if ((test_bit & forbidden_bits) == 0) { - if (test_bit == 0) { - throw std::runtime_error("Ran out of bits"); - } + TKET_ASSERT(test_bit != 0); // A free space has been found. push_back(result, next_bit, left_shift_arg); forbidden_bits |= test_bit; diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index 106d581854..4fa444a17c 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -17,6 +17,8 @@ #include #include +#include "Utils/Assert.hpp" + namespace tket { namespace tsa_internal { @@ -32,12 +34,10 @@ ArchitectureMapping::ArchitectureMapping(const Architecture& arch) const auto& node = m_vertex_to_node_mapping[ii]; { const auto citer = m_node_to_vertex_mapping.find(node); - if (citer != m_node_to_vertex_mapping.cend()) { - std::stringstream ss; - ss << "Duplicate node " << node.repr() << " at vertices " - << citer->second << ", " << ii; - throw std::runtime_error(ss.str()); - } + TKET_ASSERT( + citer == m_node_to_vertex_mapping.cend() || + AssertMessage() << "Duplicate node " << node.repr() << " at vertices " + << citer->second << ", " << ii); } m_node_to_vertex_mapping[node] = ii; } @@ -68,24 +68,22 @@ ArchitectureMapping::ArchitectureMapping( // Check that the nodes agree with the architecture object. const auto uids = arch.nodes(); - if (uids.size() != m_vertex_to_node_mapping.size()) { - std::stringstream ss; - ss << "ArchitectureMapping: passed in " << edges.size() << " edges, giving " - << m_vertex_to_node_mapping.size() - << " vertices; but the architecture object has " << uids.size() - << " vertices"; - throw std::runtime_error(ss.str()); - } + TKET_ASSERT( + uids.size() == m_vertex_to_node_mapping.size() || + AssertMessage() << "passed in " << edges.size() << " edges, giving " + << m_vertex_to_node_mapping.size() + << " vertices; but the architecture object has " + << uids.size() << " vertices"); + for (const UnitID& uid : uids) { const Node node(uid); - if (m_node_to_vertex_mapping.count(node) == 0) { - std::stringstream ss; - ss << "ArchitectureMapping: passed in " << edges.size() - << " edges, giving " << m_vertex_to_node_mapping.size() - << " vertices; but the architecture object has an unknown node " - << node.repr(); - throw std::runtime_error(ss.str()); - } + TKET_ASSERT( + m_node_to_vertex_mapping.count(node) != 0 || + AssertMessage() + << "passed in " << edges.size() << " edges, giving " + << m_vertex_to_node_mapping.size() + << " vertices; but the architecture object has an unknown node " + << node.repr()); } } @@ -95,22 +93,21 @@ size_t ArchitectureMapping::number_of_vertices() const { const Node& ArchitectureMapping::get_node(size_t vertex) const { const auto num_vertices = number_of_vertices(); - if (vertex >= num_vertices) { - std::stringstream ss; - ss << "get_node: invalid vertex " << vertex << " (architecture only has " - << num_vertices << " vertices)"; - throw std::runtime_error(ss.str()); - } + TKET_ASSERT( + vertex < num_vertices || AssertMessage() + << "get_node: invalid vertex " << vertex + << " (architecture only has " << num_vertices + << " vertices)"); + return m_vertex_to_node_mapping[vertex]; } size_t ArchitectureMapping::get_vertex(const Node& node) const { const auto citer = m_node_to_vertex_mapping.find(node); - if (citer == m_node_to_vertex_mapping.cend()) { - std::stringstream ss; - ss << "get_vertex: node " << node.repr() << " has no vertex number"; - throw std::runtime_error(ss.str()); - } + TKET_ASSERT( + citer != m_node_to_vertex_mapping.cend() || + AssertMessage() << "get_vertex: node " << node.repr() + << " has no vertex number"); return citer->second; } diff --git a/tket/src/TokenSwapping/BestFullTsa.cpp b/tket/src/TokenSwapping/BestFullTsa.cpp index 517cd48d19..9aa9dd3052 100644 --- a/tket/src/TokenSwapping/BestFullTsa.cpp +++ b/tket/src/TokenSwapping/BestFullTsa.cpp @@ -24,7 +24,8 @@ namespace tsa_internal { BestFullTsa::BestFullTsa() { m_name = "BestFullTsa"; } -HybridTsa00& BestFullTsa::get_hybrid_tsa_for_testing() { return m_hybrid_tsa; } +// HybridTsa00& BestFullTsa::get_hybrid_tsa_for_testing() { return m_hybrid_tsa; +// } void BestFullTsa::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, diff --git a/tket/src/TokenSwapping/BestFullTsa.hpp b/tket/src/TokenSwapping/BestFullTsa.hpp index d474fb898d..ef1472d531 100644 --- a/tket/src/TokenSwapping/BestFullTsa.hpp +++ b/tket/src/TokenSwapping/BestFullTsa.hpp @@ -68,7 +68,7 @@ class BestFullTsa : public PartialTsaInterface { * function may be deleted later! * @return Reference to the internal stored TSA object. */ - HybridTsa00& get_hybrid_tsa_for_testing(); + // HybridTsa00& get_hybrid_tsa_for_testing(); private: HybridTsa00 m_hybrid_tsa; From 05c39085e223d413424fd10102a292560a879129 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Thu, 20 Jan 2022 17:03:12 +0000 Subject: [PATCH 019/146] Start moving files for modularisation --- tket/src/Mapping/CMakeLists.txt | 52 +++++++++++++++++++ tket/src/Mapping/{ => include}/LexiRoute.hpp | 0 .../LexicographicalComparison.hpp | 0 .../Mapping/{ => include}/MappingFrontier.hpp | 0 .../Mapping/{ => include}/MappingManager.hpp | 0 .../{ => include}/MultiGateReorder.hpp | 0 .../Mapping/{ => include}/RoutingMethod.hpp | 0 .../{ => include}/RoutingMethodCircuit.hpp | 0 .../{ => include}/RoutingMethodJson.hpp | 0 .../Mapping/{ => include}/Verification.hpp | 0 tket/src/Placement/CMakeLists.txt | 9 +--- .../src/Placement/{ => include}/Placement.hpp | 0 12 files changed, 54 insertions(+), 7 deletions(-) create mode 100644 tket/src/Mapping/CMakeLists.txt rename tket/src/Mapping/{ => include}/LexiRoute.hpp (100%) rename tket/src/Mapping/{ => include}/LexicographicalComparison.hpp (100%) rename tket/src/Mapping/{ => include}/MappingFrontier.hpp (100%) rename tket/src/Mapping/{ => include}/MappingManager.hpp (100%) rename tket/src/Mapping/{ => include}/MultiGateReorder.hpp (100%) rename tket/src/Mapping/{ => include}/RoutingMethod.hpp (100%) rename tket/src/Mapping/{ => include}/RoutingMethodCircuit.hpp (100%) rename tket/src/Mapping/{ => include}/RoutingMethodJson.hpp (100%) rename tket/src/Mapping/{ => include}/Verification.hpp (100%) rename tket/src/Placement/{ => include}/Placement.hpp (100%) diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt new file mode 100644 index 0000000000..51832aee29 --- /dev/null +++ b/tket/src/Mapping/CMakeLists.txt @@ -0,0 +1,52 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project(tket-${COMP}) + +if (NOT ${COMP} STREQUAL "Mapping") + message(FATAL_ERROR "Unexpected component name.") +endif() + +add_library(tket-${COMP} + LexicographicalComparison.cpp + LexiRoute.cpp + MappingFrontier.cpp + MappingManager.cpp + MultiGateReorder.cpp + RoutingMethodCircuit.cpp + RoutingMethodJson.cpp + Verification.cpp) + +list(APPEND DEPS_${COMP} + Gate + Ops + OpType + Utils + Circuit + Architecture) + +foreach(DEP ${DEPS_${COMP}}) + target_include_directories( + tket-${COMP} PRIVATE ${TKET_${DEP}_INCLUDE_DIR}) + target_link_libraries( + tket-${COMP} PRIVATE tket-${DEP}) +endforeach() + +target_include_directories(tket-${COMP} + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${TKET_${COMP}_INCLUDE_DIR} + ${TKET_${COMP}_INCLUDE_DIR}/${COMP}) + +target_link_libraries(tket-${COMP} PRIVATE ${CONAN_LIBS}) diff --git a/tket/src/Mapping/LexiRoute.hpp b/tket/src/Mapping/include/LexiRoute.hpp similarity index 100% rename from tket/src/Mapping/LexiRoute.hpp rename to tket/src/Mapping/include/LexiRoute.hpp diff --git a/tket/src/Mapping/LexicographicalComparison.hpp b/tket/src/Mapping/include/LexicographicalComparison.hpp similarity index 100% rename from tket/src/Mapping/LexicographicalComparison.hpp rename to tket/src/Mapping/include/LexicographicalComparison.hpp diff --git a/tket/src/Mapping/MappingFrontier.hpp b/tket/src/Mapping/include/MappingFrontier.hpp similarity index 100% rename from tket/src/Mapping/MappingFrontier.hpp rename to tket/src/Mapping/include/MappingFrontier.hpp diff --git a/tket/src/Mapping/MappingManager.hpp b/tket/src/Mapping/include/MappingManager.hpp similarity index 100% rename from tket/src/Mapping/MappingManager.hpp rename to tket/src/Mapping/include/MappingManager.hpp diff --git a/tket/src/Mapping/MultiGateReorder.hpp b/tket/src/Mapping/include/MultiGateReorder.hpp similarity index 100% rename from tket/src/Mapping/MultiGateReorder.hpp rename to tket/src/Mapping/include/MultiGateReorder.hpp diff --git a/tket/src/Mapping/RoutingMethod.hpp b/tket/src/Mapping/include/RoutingMethod.hpp similarity index 100% rename from tket/src/Mapping/RoutingMethod.hpp rename to tket/src/Mapping/include/RoutingMethod.hpp diff --git a/tket/src/Mapping/RoutingMethodCircuit.hpp b/tket/src/Mapping/include/RoutingMethodCircuit.hpp similarity index 100% rename from tket/src/Mapping/RoutingMethodCircuit.hpp rename to tket/src/Mapping/include/RoutingMethodCircuit.hpp diff --git a/tket/src/Mapping/RoutingMethodJson.hpp b/tket/src/Mapping/include/RoutingMethodJson.hpp similarity index 100% rename from tket/src/Mapping/RoutingMethodJson.hpp rename to tket/src/Mapping/include/RoutingMethodJson.hpp diff --git a/tket/src/Mapping/Verification.hpp b/tket/src/Mapping/include/Verification.hpp similarity index 100% rename from tket/src/Mapping/Verification.hpp rename to tket/src/Mapping/include/Verification.hpp diff --git a/tket/src/Placement/CMakeLists.txt b/tket/src/Placement/CMakeLists.txt index 708432b8f7..f1bdd7f0c5 100644 --- a/tket/src/Placement/CMakeLists.txt +++ b/tket/src/Placement/CMakeLists.txt @@ -14,20 +14,15 @@ project(tket-${COMP}) -if (NOT ${COMP} STREQUAL "Routing") +if (NOT ${COMP} STREQUAL "Placement") message(FATAL_ERROR "Unexpected component name.") endif() add_library(tket-${COMP} Qubit_Placement.cpp - Swap_Analysis.cpp - Board_Analysis.cpp - Routing.cpp - Slice_Manipulation.cpp subgraph_mapping.cpp Placement.cpp - PlacementGraphClasses.cpp - Verification.cpp) + PlacementGraphClasses.cpp) list(APPEND DEPS_${COMP} Architecture diff --git a/tket/src/Placement/Placement.hpp b/tket/src/Placement/include/Placement.hpp similarity index 100% rename from tket/src/Placement/Placement.hpp rename to tket/src/Placement/include/Placement.hpp From 548e52e5fddcbe81006a6ab031be77b0ec845d22 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Tue, 1 Feb 2022 16:53:03 +0000 Subject: [PATCH 020/146] compilation refactor commit --- pytket/setup.py | 3 +- recipes/tket/conanfile.py | 4 +- tket/src/ArchAwareSynth/CMakeLists.txt | 4 +- .../include/Architecture/Architecture.hpp | 2 +- tket/src/CMakeLists.txt | 3 +- tket/src/Characterisation/CMakeLists.txt | 2 + tket/src/Circuit/macro_circ_info.cpp | 4 + tket/src/Converters/CMakeLists.txt | 2 + tket/src/Diagonalisation/CMakeLists.txt | 2 + .../Graphs}/BruteForceColouring.hpp | 0 .../Graphs}/ColouringPriority.hpp | 0 tket/src/Mapping/CMakeLists.txt | 8 +- tket/src/Mapping/MappingManager.cpp | 3 +- .../include/{ => Mapping}/LexiRoute.hpp | 0 .../LexicographicalComparison.hpp | 0 .../include/{ => Mapping}/MappingFrontier.hpp | 0 .../include/{ => Mapping}/MappingManager.hpp | 0 .../{ => Mapping}/MultiGateReorder.hpp | 0 .../include/{ => Mapping}/RoutingMethod.hpp | 0 .../{ => Mapping}/RoutingMethodCircuit.hpp | 0 .../{ => Mapping}/RoutingMethodJson.hpp | 0 .../include/{ => Mapping}/Verification.hpp | 0 tket/src/MeasurementSetup/CMakeLists.txt | 2 + tket/src/PauliGraph/CMakeLists.txt | 2 + tket/src/Placement/CMakeLists.txt | 2 + .../include/{ => Placement}/Placement.hpp | 3 + tket/src/Predicates/CMakeLists.txt | 4 +- tket/src/Program/CMakeLists.txt | 2 + tket/src/Simulation/CMakeLists.txt | 2 + tket/src/TokenSwapping/CMakeLists.txt | 82 +++++++++++++++++++ .../TSAUtils/DistanceFunctions.hpp | 2 +- .../TSAUtils/GeneralFunctions.hpp | 2 +- .../TokenSwapping/TSAUtils/SwapFunctions.hpp | 2 +- .../TSAUtils/VertexMappingFunctions.cpp | 2 +- .../TableLookup/SwapListSegmentOptimiser.cpp | 2 +- .../TableLookup/SwapListTableOptimiser.hpp | 2 +- .../TableLookup/VertexMapResizing.hpp | 2 +- .../TokenSwapping}/ArchitectureMapping.hpp | 0 .../TokenSwapping}/BestFullTsa.hpp | 0 .../TokenSwapping}/CyclesCandidateManager.hpp | 0 .../TokenSwapping}/CyclesGrowthManager.hpp | 0 .../TokenSwapping}/CyclesPartialTsa.hpp | 0 .../CyclicShiftCostEstimate.hpp | 0 .../DistancesFromArchitecture.hpp | 0 .../TokenSwapping}/DistancesInterface.hpp | 0 .../TokenSwapping}/DynamicTokenTracker.hpp | 0 .../TokenSwapping}/HybridTsa00.hpp | 0 .../NeighboursFromArchitecture.hpp | 0 .../TokenSwapping}/NeighboursInterface.hpp | 0 .../TokenSwapping}/PartialTsaInterface.hpp | 0 .../TokenSwapping}/PathFinderInterface.hpp | 0 .../{ => include/TokenSwapping}/RNG.hpp | 0 .../TokenSwapping}/RiverFlowPathFinder.hpp | 0 .../TokenSwapping}/SwapListOptimiser.hpp | 0 .../TokenSwapping}/TrivialTSA.hpp | 0 .../TokenSwapping}/VectorListHybrid.hpp | 0 .../VectorListHybridSkeleton.hpp | 0 .../TokenSwapping}/main_entry_functions.hpp | 0 tket/src/Transformations/CMakeLists.txt | 2 + .../{ => include/Utils}/AssertMessage.hpp | 0 60 files changed, 134 insertions(+), 18 deletions(-) rename tket/src/Graphs/{ => include/Graphs}/BruteForceColouring.hpp (100%) rename tket/src/Graphs/{ => include/Graphs}/ColouringPriority.hpp (100%) rename tket/src/Mapping/include/{ => Mapping}/LexiRoute.hpp (100%) rename tket/src/Mapping/include/{ => Mapping}/LexicographicalComparison.hpp (100%) rename tket/src/Mapping/include/{ => Mapping}/MappingFrontier.hpp (100%) rename tket/src/Mapping/include/{ => Mapping}/MappingManager.hpp (100%) rename tket/src/Mapping/include/{ => Mapping}/MultiGateReorder.hpp (100%) rename tket/src/Mapping/include/{ => Mapping}/RoutingMethod.hpp (100%) rename tket/src/Mapping/include/{ => Mapping}/RoutingMethodCircuit.hpp (100%) rename tket/src/Mapping/include/{ => Mapping}/RoutingMethodJson.hpp (100%) rename tket/src/Mapping/include/{ => Mapping}/Verification.hpp (100%) rename tket/src/Placement/include/{ => Placement}/Placement.hpp (99%) create mode 100644 tket/src/TokenSwapping/CMakeLists.txt rename tket/src/TokenSwapping/{ => include/TokenSwapping}/ArchitectureMapping.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/BestFullTsa.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/CyclesCandidateManager.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/CyclesGrowthManager.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/CyclesPartialTsa.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/CyclicShiftCostEstimate.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/DistancesFromArchitecture.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/DistancesInterface.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/DynamicTokenTracker.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/HybridTsa00.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/NeighboursFromArchitecture.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/NeighboursInterface.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/PartialTsaInterface.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/PathFinderInterface.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/RNG.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/RiverFlowPathFinder.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/SwapListOptimiser.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/TrivialTSA.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/VectorListHybrid.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/VectorListHybridSkeleton.hpp (100%) rename tket/src/TokenSwapping/{ => include/TokenSwapping}/main_entry_functions.hpp (100%) rename tket/src/Utils/{ => include/Utils}/AssertMessage.hpp (100%) diff --git a/pytket/setup.py b/pytket/setup.py index 40bcdbfa4d..d707068041 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -129,7 +129,8 @@ def run(self): "tket-MeasurementSetup", "tket-Transformations", "tket-ArchAwareSynth", - "tket-Predicates", + "tket-Predicates" + "tket-TokenSwapping", ] for tket_lib in tket_libs: shutil.copy(os.path.join(directory, "lib", libfile(tket_lib)), extdir) diff --git a/recipes/tket/conanfile.py b/recipes/tket/conanfile.py index 838ee7d3e4..bf603371aa 100644 --- a/recipes/tket/conanfile.py +++ b/recipes/tket/conanfile.py @@ -63,7 +63,9 @@ class TketConan(ConanFile): "Program", "Characterisation", "Converters", - "Routing", + "TokenSwapping", + "Mapping", + "Placement", "MeasurementSetup", "Transformations", "ArchAwareSynth", diff --git a/tket/src/ArchAwareSynth/CMakeLists.txt b/tket/src/ArchAwareSynth/CMakeLists.txt index 6f2747fea6..bd80756a78 100644 --- a/tket/src/ArchAwareSynth/CMakeLists.txt +++ b/tket/src/ArchAwareSynth/CMakeLists.txt @@ -33,8 +33,10 @@ list(APPEND DEPS_${COMP} Graphs Ops OpType + Mapping PauliGraph - Routing + Placement + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index b7c3d975d0..3e6cd0b63c 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -21,8 +21,8 @@ #include #include -#include "Graphs/CompleteGraph.hpp" #include "Graphs/DirectedGraph.hpp" +#include "Graphs/CompleteGraph.hpp" #include "Utils/BiMapHeaders.hpp" #include "Utils/EigenConfig.hpp" #include "Utils/Json.hpp" diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index c13b1476f0..d1e48730ff 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -78,7 +78,8 @@ list(APPEND TKET_COMPS MeasurementSetup Transformations ArchAwareSynth - Predicates) + Predicates + TokenSwapping) foreach(COMP ${TKET_COMPS}) set(TKET_${COMP}_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${COMP}/include) diff --git a/tket/src/Characterisation/CMakeLists.txt b/tket/src/Characterisation/CMakeLists.txt index 8821a71d39..3f2ec3d5f0 100644 --- a/tket/src/Characterisation/CMakeLists.txt +++ b/tket/src/Characterisation/CMakeLists.txt @@ -28,9 +28,11 @@ list(APPEND DEPS_${COMP} Circuit Gate Graphs + Mapping Ops OpType PauliGraph + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Circuit/macro_circ_info.cpp b/tket/src/Circuit/macro_circ_info.cpp index 520d36a412..e5b78051dc 100644 --- a/tket/src/Circuit/macro_circ_info.cpp +++ b/tket/src/Circuit/macro_circ_info.cpp @@ -184,10 +184,14 @@ Circuit Circuit::subcircuit(const Subcircuit& sc) const { // returns qubit path via vertices & inhabited port in vertices // used to construct a routing grid QPathDetailed Circuit::unit_path(const UnitID& unit) const { + Vertex current_v = get_in(unit); + + QPathDetailed path = {{current_v, 0}}; Edge betweenEdge = get_nth_out_edge(current_v, 0); current_v = target(betweenEdge); + while (detect_final_Op(current_v) == false) { if (n_out_edges(current_v) == 0) { throw CircuitInvalidity("A path ends before reaching an output vertex."); diff --git a/tket/src/Converters/CMakeLists.txt b/tket/src/Converters/CMakeLists.txt index 748579e9da..24f61ae4a5 100644 --- a/tket/src/Converters/CMakeLists.txt +++ b/tket/src/Converters/CMakeLists.txt @@ -32,9 +32,11 @@ list(APPEND DEPS_${COMP} Clifford Diagonalisation Gate + Mapping Ops OpType PauliGraph + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Diagonalisation/CMakeLists.txt b/tket/src/Diagonalisation/CMakeLists.txt index 68f58cf72b..f2cae85bd2 100644 --- a/tket/src/Diagonalisation/CMakeLists.txt +++ b/tket/src/Diagonalisation/CMakeLists.txt @@ -28,9 +28,11 @@ list(APPEND DEPS_${COMP} Clifford Gate Graphs + Mapping Ops OpType PauliGraph + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Graphs/BruteForceColouring.hpp b/tket/src/Graphs/include/Graphs/BruteForceColouring.hpp similarity index 100% rename from tket/src/Graphs/BruteForceColouring.hpp rename to tket/src/Graphs/include/Graphs/BruteForceColouring.hpp diff --git a/tket/src/Graphs/ColouringPriority.hpp b/tket/src/Graphs/include/Graphs/ColouringPriority.hpp similarity index 100% rename from tket/src/Graphs/ColouringPriority.hpp rename to tket/src/Graphs/include/Graphs/ColouringPriority.hpp diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt index 51832aee29..534adc2ea8 100644 --- a/tket/src/Mapping/CMakeLists.txt +++ b/tket/src/Mapping/CMakeLists.txt @@ -29,12 +29,14 @@ add_library(tket-${COMP} Verification.cpp) list(APPEND DEPS_${COMP} + Architecture + Circuit Gate + Graphs Ops OpType - Utils - Circuit - Architecture) + TokenSwapping + Utils) foreach(DEP ${DEPS_${COMP}}) target_include_directories( diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index 3250345a1c..e5319168d2 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -1,7 +1,6 @@ #include "Mapping/MappingManager.hpp" - -#include "OpType/OpTypeFunctions.hpp" #include "TokenSwapping/main_entry_functions.hpp" +#include "OpType/OpTypeFunctions.hpp" namespace tket { diff --git a/tket/src/Mapping/include/LexiRoute.hpp b/tket/src/Mapping/include/Mapping/LexiRoute.hpp similarity index 100% rename from tket/src/Mapping/include/LexiRoute.hpp rename to tket/src/Mapping/include/Mapping/LexiRoute.hpp diff --git a/tket/src/Mapping/include/LexicographicalComparison.hpp b/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp similarity index 100% rename from tket/src/Mapping/include/LexicographicalComparison.hpp rename to tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp diff --git a/tket/src/Mapping/include/MappingFrontier.hpp b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp similarity index 100% rename from tket/src/Mapping/include/MappingFrontier.hpp rename to tket/src/Mapping/include/Mapping/MappingFrontier.hpp diff --git a/tket/src/Mapping/include/MappingManager.hpp b/tket/src/Mapping/include/Mapping/MappingManager.hpp similarity index 100% rename from tket/src/Mapping/include/MappingManager.hpp rename to tket/src/Mapping/include/Mapping/MappingManager.hpp diff --git a/tket/src/Mapping/include/MultiGateReorder.hpp b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp similarity index 100% rename from tket/src/Mapping/include/MultiGateReorder.hpp rename to tket/src/Mapping/include/Mapping/MultiGateReorder.hpp diff --git a/tket/src/Mapping/include/RoutingMethod.hpp b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp similarity index 100% rename from tket/src/Mapping/include/RoutingMethod.hpp rename to tket/src/Mapping/include/Mapping/RoutingMethod.hpp diff --git a/tket/src/Mapping/include/RoutingMethodCircuit.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp similarity index 100% rename from tket/src/Mapping/include/RoutingMethodCircuit.hpp rename to tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp diff --git a/tket/src/Mapping/include/RoutingMethodJson.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp similarity index 100% rename from tket/src/Mapping/include/RoutingMethodJson.hpp rename to tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp diff --git a/tket/src/Mapping/include/Verification.hpp b/tket/src/Mapping/include/Mapping/Verification.hpp similarity index 100% rename from tket/src/Mapping/include/Verification.hpp rename to tket/src/Mapping/include/Mapping/Verification.hpp diff --git a/tket/src/MeasurementSetup/CMakeLists.txt b/tket/src/MeasurementSetup/CMakeLists.txt index 7c521ee31b..206ce69a41 100644 --- a/tket/src/MeasurementSetup/CMakeLists.txt +++ b/tket/src/MeasurementSetup/CMakeLists.txt @@ -28,9 +28,11 @@ list(APPEND DEPS_${COMP} Converters Diagonalisation Gate + Mapping Ops OpType PauliGraph + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/PauliGraph/CMakeLists.txt b/tket/src/PauliGraph/CMakeLists.txt index 061c9cc8f5..b10423478c 100644 --- a/tket/src/PauliGraph/CMakeLists.txt +++ b/tket/src/PauliGraph/CMakeLists.txt @@ -25,8 +25,10 @@ add_library(tket-${COMP} list(APPEND DEPS_${COMP} Clifford Gate + Mapping Ops OpType + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Placement/CMakeLists.txt b/tket/src/Placement/CMakeLists.txt index f1bdd7f0c5..3cc817881a 100644 --- a/tket/src/Placement/CMakeLists.txt +++ b/tket/src/Placement/CMakeLists.txt @@ -30,8 +30,10 @@ list(APPEND DEPS_${COMP} Circuit Gate Graphs + Mapping Ops OpType + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Placement/include/Placement.hpp b/tket/src/Placement/include/Placement/Placement.hpp similarity index 99% rename from tket/src/Placement/include/Placement.hpp rename to tket/src/Placement/include/Placement/Placement.hpp index 0bfdaf8f0a..a3241e34be 100644 --- a/tket/src/Placement/include/Placement.hpp +++ b/tket/src/Placement/include/Placement/Placement.hpp @@ -277,6 +277,9 @@ class LinePlacement : public Placement { const Circuit& circ_) const override; }; + + + class GraphPlacement : public Placement { public: explicit GraphPlacement(const Architecture& _arc) { diff --git a/tket/src/Predicates/CMakeLists.txt b/tket/src/Predicates/CMakeLists.txt index acea56bef9..3e871ffd04 100644 --- a/tket/src/Predicates/CMakeLists.txt +++ b/tket/src/Predicates/CMakeLists.txt @@ -34,10 +34,12 @@ list(APPEND DEPS_${COMP} Converters Gate Graphs + Mapping Ops OpType PauliGraph - Routing + Placement + TokenSwapping Transformations Utils) diff --git a/tket/src/Program/CMakeLists.txt b/tket/src/Program/CMakeLists.txt index 805bdfc6ad..95fd56f913 100644 --- a/tket/src/Program/CMakeLists.txt +++ b/tket/src/Program/CMakeLists.txt @@ -28,8 +28,10 @@ add_library(tket-${COMP} list(APPEND DEPS_${COMP} Circuit Gate + Mapping Ops OpType + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Simulation/CMakeLists.txt b/tket/src/Simulation/CMakeLists.txt index 4f9d122303..78e5e21baa 100644 --- a/tket/src/Simulation/CMakeLists.txt +++ b/tket/src/Simulation/CMakeLists.txt @@ -29,8 +29,10 @@ add_library(tket-${COMP} list(APPEND DEPS_${COMP} Circuit Gate + Mapping Ops OpType + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/TokenSwapping/CMakeLists.txt b/tket/src/TokenSwapping/CMakeLists.txt new file mode 100644 index 0000000000..05a9bc8364 --- /dev/null +++ b/tket/src/TokenSwapping/CMakeLists.txt @@ -0,0 +1,82 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project(tket-${COMP}) + +if (NOT ${COMP} STREQUAL "TokenSwapping") + message(FATAL_ERROR "Unexpected component name.") +endif() + +add_library(tket-${COMP} + ArchitectureMapping.cpp + BestFullTsa.cpp + CyclesCandidateManager.cpp + CyclesGrowthManager.cpp + CyclesPartialTsa.cpp + CyclicShiftCostEstimate.cpp + DistancesFromArchitecture.cpp + DistancesInterface.cpp + DynamicTokenTracker.cpp + HybridTsa00.cpp + main_entry_functions.cpp + NeighboursFromArchitecture.cpp + NeighboursInterface.cpp + PartialTsaInterface.cpp + PathFinderInterface.cpp + RiverFlowPathFinder.cpp + RNG.cpp + SwapListOptimiser.cpp + TrivialTSA.cpp + VectorListHybridSkeleton.cpp + TSAUtils/DebugFunctions.cpp + TSAUtils/DistanceFunctions.cpp + TSAUtils/GeneralFunctions.cpp + TSAUtils/SwapFunctions.cpp + TSAUtils/VertexMappingFunctions.cpp + TSAUtils/VertexSwapResult.cpp + TableLookup/CanonicalRelabelling.cpp + TableLookup/ExactMappingLookup.cpp + TableLookup/FilteredSwapSequences.cpp + TableLookup/PartialMappingLookup.cpp + TableLookup/SwapConversion.cpp + TableLookup/SwapListSegmentOptimiser.cpp + TableLookup/SwapListTableOptimiser.cpp + TableLookup/SwapSequenceTable.cpp + TableLookup/VertexMapResizing.cpp + ) + +list(APPEND DEPS_${COMP} + Architecture + Circuit + Gate + Graphs + Mapping + Ops + OpType + Utils) + +foreach(DEP ${DEPS_${COMP}}) + target_include_directories( + tket-${COMP} PRIVATE ${TKET_${DEP}_INCLUDE_DIR}) + target_link_libraries( + tket-${COMP} PRIVATE tket-${DEP}) +endforeach() + +target_include_directories(tket-${COMP} + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${TKET_${COMP}_INCLUDE_DIR} + ${TKET_${COMP}_INCLUDE_DIR}/${COMP}) + +target_link_libraries(tket-${COMP} PRIVATE ${CONAN_LIBS}) diff --git a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp index cd077dee48..43612bfeb5 100644 --- a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp +++ b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp @@ -18,7 +18,7 @@ #include #include -#include "../DistancesInterface.hpp" +#include "TokenSwapping/DistancesInterface.hpp" #include "VertexMappingFunctions.hpp" namespace tket { diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp index bcf9162590..dd3ab9dff4 100644 --- a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp @@ -23,7 +23,7 @@ #include #include -#include "../RNG.hpp" +#include "TokenSwapping/RNG.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp index e259c051ec..b8fe8063d5 100644 --- a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp +++ b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp @@ -18,7 +18,7 @@ #include #include -#include "../VectorListHybrid.hpp" +#include "TokenSwapping/VectorListHybrid.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index 9ac8ba8a09..e2cff6931d 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -17,7 +17,7 @@ #include #include -#include "../../Utils/Assert.hpp" +#include "Utils/Assert.hpp" #include "VertexSwapResult.hpp" namespace tket { diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp index 0c07183d08..d5a050fc70 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp @@ -18,7 +18,7 @@ #include #include -#include "../../Utils/Assert.hpp" +#include "Utils/Assert.hpp" using std::vector; diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp index 3d97025bac..b383b77167 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp @@ -17,7 +17,7 @@ #include -#include "../SwapListOptimiser.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" #include "PartialMappingLookup.hpp" #include "SwapListSegmentOptimiser.hpp" #include "VertexMapResizing.hpp" diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp index aafeb49f20..b42b34a38d 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp @@ -19,7 +19,7 @@ #include #include -#include "../NeighboursInterface.hpp" +#include "TokenSwapping/NeighboursInterface.hpp" #include "../TSAUtils/VertexMappingFunctions.hpp" namespace tket { diff --git a/tket/src/TokenSwapping/ArchitectureMapping.hpp b/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp similarity index 100% rename from tket/src/TokenSwapping/ArchitectureMapping.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp diff --git a/tket/src/TokenSwapping/BestFullTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp similarity index 100% rename from tket/src/TokenSwapping/BestFullTsa.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp similarity index 100% rename from tket/src/TokenSwapping/CyclesCandidateManager.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp similarity index 100% rename from tket/src/TokenSwapping/CyclesGrowthManager.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp diff --git a/tket/src/TokenSwapping/CyclesPartialTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp similarity index 100% rename from tket/src/TokenSwapping/CyclesPartialTsa.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclicShiftCostEstimate.hpp similarity index 100% rename from tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/CyclicShiftCostEstimate.hpp diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp similarity index 100% rename from tket/src/TokenSwapping/DistancesFromArchitecture.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp diff --git a/tket/src/TokenSwapping/DistancesInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp similarity index 100% rename from tket/src/TokenSwapping/DistancesInterface.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp diff --git a/tket/src/TokenSwapping/DynamicTokenTracker.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp similarity index 100% rename from tket/src/TokenSwapping/DynamicTokenTracker.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp diff --git a/tket/src/TokenSwapping/HybridTsa00.hpp b/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa00.hpp similarity index 100% rename from tket/src/TokenSwapping/HybridTsa00.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/HybridTsa00.hpp diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.hpp b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp similarity index 100% rename from tket/src/TokenSwapping/NeighboursFromArchitecture.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp diff --git a/tket/src/TokenSwapping/NeighboursInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp similarity index 100% rename from tket/src/TokenSwapping/NeighboursInterface.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp diff --git a/tket/src/TokenSwapping/PartialTsaInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp similarity index 100% rename from tket/src/TokenSwapping/PartialTsaInterface.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp diff --git a/tket/src/TokenSwapping/PathFinderInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp similarity index 100% rename from tket/src/TokenSwapping/PathFinderInterface.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp diff --git a/tket/src/TokenSwapping/RNG.hpp b/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp similarity index 100% rename from tket/src/TokenSwapping/RNG.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.hpp b/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp similarity index 100% rename from tket/src/TokenSwapping/RiverFlowPathFinder.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp diff --git a/tket/src/TokenSwapping/SwapListOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp similarity index 100% rename from tket/src/TokenSwapping/SwapListOptimiser.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp diff --git a/tket/src/TokenSwapping/TrivialTSA.hpp b/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp similarity index 100% rename from tket/src/TokenSwapping/TrivialTSA.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp diff --git a/tket/src/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp similarity index 100% rename from tket/src/TokenSwapping/VectorListHybrid.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp similarity index 100% rename from tket/src/TokenSwapping/VectorListHybridSkeleton.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp diff --git a/tket/src/TokenSwapping/main_entry_functions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp similarity index 100% rename from tket/src/TokenSwapping/main_entry_functions.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp diff --git a/tket/src/Transformations/CMakeLists.txt b/tket/src/Transformations/CMakeLists.txt index ada33d2458..f6955c346e 100644 --- a/tket/src/Transformations/CMakeLists.txt +++ b/tket/src/Transformations/CMakeLists.txt @@ -42,9 +42,11 @@ list(APPEND DEPS_${COMP} Converters Gate Graphs + Mapping Ops OpType PauliGraph + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Utils/AssertMessage.hpp b/tket/src/Utils/include/Utils/AssertMessage.hpp similarity index 100% rename from tket/src/Utils/AssertMessage.hpp rename to tket/src/Utils/include/Utils/AssertMessage.hpp From edeaf706898468432da6844f62ee97134a63700e Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Tue, 1 Feb 2022 17:03:53 +0000 Subject: [PATCH 021/146] Reorder base cmake --- tket/src/CMakeLists.txt | 4 ++-- tket/src/TokenSwapping/CMakeLists.txt | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index d1e48730ff..083f42acae 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -73,13 +73,13 @@ list(APPEND TKET_COMPS Program Characterisation Converters + TokenSwapping Mapping Placement MeasurementSetup Transformations ArchAwareSynth - Predicates - TokenSwapping) + Predicates) foreach(COMP ${TKET_COMPS}) set(TKET_${COMP}_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${COMP}/include) diff --git a/tket/src/TokenSwapping/CMakeLists.txt b/tket/src/TokenSwapping/CMakeLists.txt index 05a9bc8364..3d135284f2 100644 --- a/tket/src/TokenSwapping/CMakeLists.txt +++ b/tket/src/TokenSwapping/CMakeLists.txt @@ -61,7 +61,6 @@ list(APPEND DEPS_${COMP} Circuit Gate Graphs - Mapping Ops OpType Utils) From 8b5f108e3d60b6f874f03662ad6cb495961a64fa Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Tue, 1 Feb 2022 17:46:50 +0000 Subject: [PATCH 022/146] add assertmessage.cpp to compiilation --- tket/src/Utils/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tket/src/Utils/CMakeLists.txt b/tket/src/Utils/CMakeLists.txt index d81eee9d36..50010e9ee8 100644 --- a/tket/src/Utils/CMakeLists.txt +++ b/tket/src/Utils/CMakeLists.txt @@ -19,6 +19,7 @@ if (NOT ${COMP} STREQUAL "Utils") endif() add_library(tket-Utils + AssertMessage.cpp TketLog.cpp UnitID.cpp HelperFunctions.cpp From c1f3e537db10ca0d0d13f62edca77ad9d8753836 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 2 Feb 2022 11:43:21 +0000 Subject: [PATCH 023/146] Rework TokenSwapping includes --- .../include/Architecture/Architecture.hpp | 2 +- tket/src/Circuit/macro_circ_info.cpp | 2 - tket/src/Mapping/MappingManager.cpp | 3 +- tket/src/TokenSwapping/BestFullTsa.cpp | 8 +- .../TokenSwapping/CyclesCandidateManager.cpp | 2 +- .../src/TokenSwapping/CyclesGrowthManager.cpp | 2 +- tket/src/TokenSwapping/HybridTsa00.cpp | 2 +- .../src/TokenSwapping/RiverFlowPathFinder.cpp | 2 +- tket/src/TokenSwapping/SwapListOptimiser.cpp | 2 +- .../TokenSwapping/TSAUtils/DebugFunctions.cpp | 2 +- .../TSAUtils/DistanceFunctions.cpp | 2 +- .../TSAUtils/GeneralFunctions.cpp | 2 +- .../TokenSwapping/TSAUtils/SwapFunctions.cpp | 2 +- .../TSAUtils/VertexMappingFunctions.cpp | 4 +- .../TSAUtils/VertexSwapResult.cpp | 2 +- .../TableLookup/CanonicalRelabelling.cpp | 2 +- .../TableLookup/ExactMappingLookup.cpp | 8 +- .../TableLookup/FilteredSwapSequences.cpp | 6 +- .../TableLookup/PartialMappingLookup.cpp | 2 +- .../TableLookup/SwapConversion.cpp | 2 +- .../TableLookup/SwapListSegmentOptimiser.cpp | 2 +- .../TableLookup/SwapListTableOptimiser.cpp | 4 +- .../TableLookup/SwapSequenceTable.cpp | 2 +- .../TableLookup/VertexMapResizing.cpp | 2 +- .../TableLookup/VertexMapResizing.hpp | 2 +- tket/src/TokenSwapping/TrivialTSA.cpp | 8 +- .../TokenSwapping/ArchitectureMapping.hpp | 2 +- .../include/TokenSwapping/BestFullTsa.hpp | 2 +- .../TokenSwapping}/CanonicalRelabelling.hpp | 2 +- .../TokenSwapping/CyclesGrowthManager.hpp | 6 +- .../TokenSwapping}/DebugFunctions.hpp | 0 .../TokenSwapping}/DistanceFunctions.hpp | 0 .../DistancesFromArchitecture.hpp | 2 +- .../TokenSwapping/DynamicTokenTracker.hpp | 2 +- .../TokenSwapping}/ExactMappingLookup.hpp | 0 .../TokenSwapping}/FilteredSwapSequences.hpp | 0 .../TokenSwapping}/GeneralFunctions.hpp | 0 .../TokenSwapping}/PartialMappingLookup.hpp | 0 .../TokenSwapping/PartialTsaInterface.hpp | 2 +- .../TokenSwapping}/SwapConversion.hpp | 2 +- .../TokenSwapping}/SwapFunctions.hpp | 0 .../SwapListSegmentOptimiser.hpp | 2 +- .../TokenSwapping}/SwapListTableOptimiser.hpp | 2 +- .../TokenSwapping}/SwapSequenceTable.hpp | 0 .../TokenSwapping/VertexMapResizing.hpp | 120 ++++++++++++++++++ .../TokenSwapping}/VertexMappingFunctions.hpp | 0 .../TokenSwapping}/VertexSwapResult.hpp | 0 .../TokenSwapping/main_entry_functions.cpp | 2 +- .../TableLookup/NeighboursFromEdges.hpp | 2 +- .../SwapSequenceReductionTester.cpp | 8 +- .../SwapSequenceReductionTester.hpp | 2 +- .../TableLookup/test_CanonicalRelabelling.cpp | 2 +- .../TableLookup/test_ExactMappingLookup.cpp | 8 +- .../test_FilteredSwapSequences.cpp | 2 +- .../TableLookup/test_SwapSequenceTable.cpp | 4 +- .../TokenSwapping/TestUtils/BestTsaTester.cpp | 4 +- .../TestUtils/DecodedProblemData.cpp | 4 +- .../TestUtils/DecodedProblemData.hpp | 2 +- .../TestUtils/FullTsaTesting.cpp | 6 +- .../TestUtils/PartialTsaTesting.cpp | 4 +- .../TestUtils/ProblemGeneration.cpp | 2 +- .../TestUtils/ProblemGeneration.hpp | 2 +- tket/tests/TokenSwapping/test_SwapList.cpp | 2 +- .../TokenSwapping/test_SwapListOptimiser.cpp | 2 +- .../TokenSwapping/test_VariousPartialTsa.cpp | 2 +- 65 files changed, 201 insertions(+), 82 deletions(-) rename tket/src/TokenSwapping/{TableLookup => include/TokenSwapping}/CanonicalRelabelling.hpp (98%) rename tket/src/TokenSwapping/{TSAUtils => include/TokenSwapping}/DebugFunctions.hpp (100%) rename tket/src/TokenSwapping/{TSAUtils => include/TokenSwapping}/DistanceFunctions.hpp (100%) rename tket/src/TokenSwapping/{TableLookup => include/TokenSwapping}/ExactMappingLookup.hpp (100%) rename tket/src/TokenSwapping/{TableLookup => include/TokenSwapping}/FilteredSwapSequences.hpp (100%) rename tket/src/TokenSwapping/{TSAUtils => include/TokenSwapping}/GeneralFunctions.hpp (100%) rename tket/src/TokenSwapping/{TableLookup => include/TokenSwapping}/PartialMappingLookup.hpp (100%) rename tket/src/TokenSwapping/{TableLookup => include/TokenSwapping}/SwapConversion.hpp (98%) rename tket/src/TokenSwapping/{TSAUtils => include/TokenSwapping}/SwapFunctions.hpp (100%) rename tket/src/TokenSwapping/{TableLookup => include/TokenSwapping}/SwapListSegmentOptimiser.hpp (98%) rename tket/src/TokenSwapping/{TableLookup => include/TokenSwapping}/SwapListTableOptimiser.hpp (100%) rename tket/src/TokenSwapping/{TableLookup => include/TokenSwapping}/SwapSequenceTable.hpp (100%) create mode 100644 tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp rename tket/src/TokenSwapping/{TSAUtils => include/TokenSwapping}/VertexMappingFunctions.hpp (100%) rename tket/src/TokenSwapping/{TSAUtils => include/TokenSwapping}/VertexSwapResult.hpp (100%) diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index 3e6cd0b63c..b7c3d975d0 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -21,8 +21,8 @@ #include #include -#include "Graphs/DirectedGraph.hpp" #include "Graphs/CompleteGraph.hpp" +#include "Graphs/DirectedGraph.hpp" #include "Utils/BiMapHeaders.hpp" #include "Utils/EigenConfig.hpp" #include "Utils/Json.hpp" diff --git a/tket/src/Circuit/macro_circ_info.cpp b/tket/src/Circuit/macro_circ_info.cpp index e5b78051dc..e453e79cca 100644 --- a/tket/src/Circuit/macro_circ_info.cpp +++ b/tket/src/Circuit/macro_circ_info.cpp @@ -184,10 +184,8 @@ Circuit Circuit::subcircuit(const Subcircuit& sc) const { // returns qubit path via vertices & inhabited port in vertices // used to construct a routing grid QPathDetailed Circuit::unit_path(const UnitID& unit) const { - Vertex current_v = get_in(unit); - QPathDetailed path = {{current_v, 0}}; Edge betweenEdge = get_nth_out_edge(current_v, 0); current_v = target(betweenEdge); diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index e5319168d2..3250345a1c 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -1,6 +1,7 @@ #include "Mapping/MappingManager.hpp" -#include "TokenSwapping/main_entry_functions.hpp" + #include "OpType/OpTypeFunctions.hpp" +#include "TokenSwapping/main_entry_functions.hpp" namespace tket { diff --git a/tket/src/TokenSwapping/BestFullTsa.cpp b/tket/src/TokenSwapping/BestFullTsa.cpp index 9aa9dd3052..30c30bbaaa 100644 --- a/tket/src/TokenSwapping/BestFullTsa.cpp +++ b/tket/src/TokenSwapping/BestFullTsa.cpp @@ -14,10 +14,10 @@ #include "BestFullTsa.hpp" -#include "DistancesFromArchitecture.hpp" -#include "NeighboursFromArchitecture.hpp" -#include "RiverFlowPathFinder.hpp" -#include "TableLookup/VertexMapResizing.hpp" +#include "TokenSwapping/DistancesFromArchitecture.hpp" +#include "TokenSwapping/NeighboursFromArchitecture.hpp" +#include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "TokenSwapping/VertexMapResizing.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.cpp b/tket/src/TokenSwapping/CyclesCandidateManager.cpp index 338d07c17b..6ce7852929 100644 --- a/tket/src/TokenSwapping/CyclesCandidateManager.cpp +++ b/tket/src/TokenSwapping/CyclesCandidateManager.cpp @@ -18,7 +18,7 @@ #include #include -#include "TSAUtils/VertexSwapResult.hpp" +#include "VertexSwapResult.hpp" using std::vector; diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.cpp b/tket/src/TokenSwapping/CyclesGrowthManager.cpp index bfb0f94cd8..b27dd92798 100644 --- a/tket/src/TokenSwapping/CyclesGrowthManager.cpp +++ b/tket/src/TokenSwapping/CyclesGrowthManager.cpp @@ -16,7 +16,7 @@ #include -#include "TSAUtils/DistanceFunctions.hpp" +#include "TokenSwapping/DistanceFunctions.hpp" using std::vector; diff --git a/tket/src/TokenSwapping/HybridTsa00.cpp b/tket/src/TokenSwapping/HybridTsa00.cpp index f897c3e047..0ab272412e 100644 --- a/tket/src/TokenSwapping/HybridTsa00.cpp +++ b/tket/src/TokenSwapping/HybridTsa00.cpp @@ -14,7 +14,7 @@ #include "HybridTsa00.hpp" -#include "TSAUtils/DistanceFunctions.hpp" +#include "TokenSwapping/DistanceFunctions.hpp" #include "Utils/Assert.hpp" using std::vector; diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp index af470ef7af..481f808355 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -17,7 +17,7 @@ #include #include -#include "TSAUtils/SwapFunctions.hpp" +#include "SwapFunctions.hpp" #include "Utils/Assert.hpp" using std::vector; diff --git a/tket/src/TokenSwapping/SwapListOptimiser.cpp b/tket/src/TokenSwapping/SwapListOptimiser.cpp index 28c4e29fcb..42773437ce 100644 --- a/tket/src/TokenSwapping/SwapListOptimiser.cpp +++ b/tket/src/TokenSwapping/SwapListOptimiser.cpp @@ -14,8 +14,8 @@ #include "SwapListOptimiser.hpp" -#include "TSAUtils/VertexSwapResult.hpp" #include "Utils/Assert.hpp" +#include "VertexSwapResult.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp index 1f50673ec8..a16b9cca6b 100644 --- a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "DebugFunctions.hpp" +#include "TokenSwapping/DebugFunctions.hpp" #include diff --git a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp index 16f28fc4f0..7463e35b09 100644 --- a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "DistanceFunctions.hpp" +#include "TokenSwapping/DistanceFunctions.hpp" #include #include diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp index 243fe46de7..b87bb1902c 100644 --- a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "GeneralFunctions.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" #include #include diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp index 7c2d9dbfe5..282b2efd36 100644 --- a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "SwapFunctions.hpp" +#include "TokenSwapping/SwapFunctions.hpp" #include #include diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index e2cff6931d..5d8b1c965c 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "VertexMappingFunctions.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" #include #include +#include "TokenSwapping/VertexSwapResult.hpp" #include "Utils/Assert.hpp" -#include "VertexSwapResult.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp index 0a9414e6d2..51c1c65fcc 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "VertexSwapResult.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp index f4733fd6a0..e26a60b976 100644 --- a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "CanonicalRelabelling.hpp" +#include "TokenSwapping/CanonicalRelabelling.hpp" #include #include diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp index b451f9dba8..d6c5f96c07 100644 --- a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ExactMappingLookup.hpp" +#include "TokenSwapping/ExactMappingLookup.hpp" #include -#include "../TSAUtils/GeneralFunctions.hpp" -#include "FilteredSwapSequences.hpp" -#include "SwapConversion.hpp" +#include "TokenSwapping/FilteredSwapSequences.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" +#include "TokenSwapping/SwapConversion.hpp" #include "Utils/Assert.hpp" using std::vector; diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp index 0e98536471..39fb0ccbdc 100644 --- a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp +++ b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "FilteredSwapSequences.hpp" +#include "TokenSwapping/FilteredSwapSequences.hpp" #include -#include "../TSAUtils/GeneralFunctions.hpp" -#include "SwapSequenceTable.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" +#include "TokenSwapping/SwapSequenceTable.hpp" #include "Utils/Assert.hpp" ; diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp index b4aa171cfc..891e80770a 100644 --- a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "PartialMappingLookup.hpp" +#include "TokenSwapping/PartialMappingLookup.hpp" #include diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp index 25c7ce3f09..0c8d8ad3ad 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "SwapConversion.hpp" +#include "TokenSwapping/SwapConversion.hpp" #include "Utils/Assert.hpp" diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp index d5a050fc70..9fa6286e5a 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "SwapListSegmentOptimiser.hpp" +#include "TokenSwapping/SwapListSegmentOptimiser.hpp" #include #include diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp index c3cf69c41a..6478942473 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "SwapListTableOptimiser.hpp" +#include "TokenSwapping/SwapListTableOptimiser.hpp" #include #include #include -#include "../TSAUtils/DebugFunctions.hpp" +#include "TokenSwapping/DebugFunctions.hpp" #include "Utils/Assert.hpp" namespace tket { diff --git a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp index ad1c412983..21a2597558 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "SwapSequenceTable.hpp" +#include "TokenSwapping/SwapSequenceTable.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp index a2b7367f8b..7060995b1b 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "VertexMapResizing.hpp" +#include "TokenSwapping/VertexMapResizing.hpp" #include #include diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp index b42b34a38d..24cd00adb2 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp @@ -19,8 +19,8 @@ #include #include +#include "../VertexMappingFunctions.hpp" #include "TokenSwapping/NeighboursInterface.hpp" -#include "../TSAUtils/VertexMappingFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TrivialTSA.cpp b/tket/src/TokenSwapping/TrivialTSA.cpp index 372e4e8528..faf01a58a1 100644 --- a/tket/src/TokenSwapping/TrivialTSA.cpp +++ b/tket/src/TokenSwapping/TrivialTSA.cpp @@ -18,10 +18,10 @@ #include #include "CyclicShiftCostEstimate.hpp" -#include "TSAUtils/DebugFunctions.hpp" -#include "TSAUtils/DistanceFunctions.hpp" -#include "TSAUtils/GeneralFunctions.hpp" -#include "TSAUtils/VertexSwapResult.hpp" +#include "TokenSwapping/DebugFunctions.hpp" +#include "TokenSwapping/DistanceFunctions.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" #include "Utils/Assert.hpp" using std::vector; diff --git a/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp b/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp index 37d15f7f67..dfe579bff4 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp @@ -15,7 +15,7 @@ #pragma once #include "Architecture/Architecture.hpp" -#include "TSAUtils/SwapFunctions.hpp" +#include "TokenSwapping/SwapFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp index ef1472d531..f476dbc0e8 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp @@ -18,7 +18,7 @@ #include "HybridTsa00.hpp" #include "RNG.hpp" #include "SwapListOptimiser.hpp" -#include "TableLookup/SwapListTableOptimiser.hpp" +#include "TokenSwapping/SwapListTableOptimiser.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp similarity index 98% rename from tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp index ede7f32ef3..d509b5bcaa 100644 --- a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp @@ -13,7 +13,7 @@ // limitations under the License. #pragma once -#include "../TSAUtils/VertexMappingFunctions.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp index 87f2bac7cb..768c6b902e 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp @@ -14,9 +14,9 @@ #pragma once -#include "DistancesInterface.hpp" -#include "NeighboursInterface.hpp" -#include "TSAUtils/VertexMappingFunctions.hpp" +#include "TokenSwapping/DistancesInterface.hpp" +#include "TokenSwapping/NeighboursInterface.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DebugFunctions.hpp similarity index 100% rename from tket/src/TokenSwapping/TSAUtils/DebugFunctions.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/DebugFunctions.hpp diff --git a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp similarity index 100% rename from tket/src/TokenSwapping/TSAUtils/DistanceFunctions.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp index f6d6ea12e1..718b81d1e9 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp @@ -16,7 +16,7 @@ #include "ArchitectureMapping.hpp" #include "DistancesInterface.hpp" -#include "TSAUtils/SwapFunctions.hpp" +#include "SwapFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp index a230e77869..bc6e7ab8e1 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp @@ -14,7 +14,7 @@ #pragma once -#include "TSAUtils/VertexMappingFunctions.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.hpp b/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp similarity index 100% rename from tket/src/TokenSwapping/TableLookup/ExactMappingLookup.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.hpp b/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp similarity index 100% rename from tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp similarity index 100% rename from tket/src/TokenSwapping/TSAUtils/GeneralFunctions.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp similarity index 100% rename from tket/src/TokenSwapping/TableLookup/PartialMappingLookup.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp diff --git a/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp index 82e6cfc03a..2479a1907d 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp @@ -17,7 +17,7 @@ #include "DistancesInterface.hpp" #include "NeighboursInterface.hpp" #include "PathFinderInterface.hpp" -#include "TSAUtils/VertexMappingFunctions.hpp" +#include "VertexMappingFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp similarity index 98% rename from tket/src/TokenSwapping/TableLookup/SwapConversion.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp index b2d73b6d8e..58868ef21c 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapConversion.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp @@ -16,7 +16,7 @@ #include -#include "../TSAUtils/SwapFunctions.hpp" +#include "TokenSwapping/SwapFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp similarity index 100% rename from tket/src/TokenSwapping/TSAUtils/SwapFunctions.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp similarity index 98% rename from tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp index 006273807e..2d180e06ed 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp @@ -19,8 +19,8 @@ #include #include -#include "../TSAUtils/SwapFunctions.hpp" #include "PartialMappingLookup.hpp" +#include "TokenSwapping/SwapFunctions.hpp" #include "VertexMapResizing.hpp" namespace tket { diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp similarity index 100% rename from tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp index b383b77167..ac815c2f7c 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp @@ -17,9 +17,9 @@ #include -#include "TokenSwapping/SwapListOptimiser.hpp" #include "PartialMappingLookup.hpp" #include "SwapListSegmentOptimiser.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" #include "VertexMapResizing.hpp" /// TODO: The swap table optimiser currently tries to optimise many segments; diff --git a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp similarity index 100% rename from tket/src/TokenSwapping/TableLookup/SwapSequenceTable.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp new file mode 100644 index 0000000000..b8dd971001 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp @@ -0,0 +1,120 @@ +// Copyright 2019-2021 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +#include "TokenSwapping/NeighboursInterface.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** If a vertex mapping { u -> v } has too few vertices, try to add extra + * vertices, fixed by the new mapping, to get to the desired size. This may + * allow extra optimisations to be found in the table. E.g., imagine a vertex in + * a graph which is not moved by the mapping. Imagine that removing it makes the + * graph disconnected. If the desired mapping moves a token + * between different components, it is then impossible for any swap + * sequence within the subgraph to perform that mapping. + * However, adding the vertex back makes it possible. + * + * If instead there are too many vertices to look up in the table, it tries + * to remove vertices which are fixed by the mapping to get it down to size. + */ +class VertexMapResizing : public NeighboursInterface { + public: + /** Store a Neighbours object, to be used throughout when required to find + * all neighbours of a given vertex. The caller must ensure that the + * object remains valid. + * @param neighbours The object to calculate neighbours of a vertex. + */ + explicit VertexMapResizing(NeighboursInterface& neighbours); + + /** Gets the data by calling the NeighboursInterface object which was passed + * into the constructor. HOWEVER, it does internal caching, so doesn't call it + * multiple times. + * @param vertex A vertex in the graph. + * @return A cached list of neighbours of that vertex, stored internally. + */ + virtual const std::vector& operator()(size_t vertex) override; + + /** The result of resizing a mapping by deleting fixed vertices if too big, + * or adding new vertices if too small. + */ + struct Result { + /** It is still a success if we have fewer vertices than the desired number + * (as this can still be looked up in the table). However, it's a failure if + * there are too many vertices (which than cannot be looked up). + */ + bool success; + + /** If successful, the edges of the subgraph containing only the vertices in + * the new mapping. */ + std::vector edges; + }; + + /** The mapping may be altered, even upon failure, so obviously the caller + * should make a copy if it needs to be preserved. Increase the map size as + * much as possible if too small (still a success even if it cannot reach the + * size). Decrease the size if too large (and not reaching the szie is then a + * failure). Newly added or removed vertices are all fixed, i.e. map[v]=v. + * @param mapping The mapping which will be altered and returned by reference. + * @param desired_size The size we wish to reach, or as close as possible if + * the mapping is currently too small. + */ + const Result& resize_mapping( + VertexMapping& mapping, unsigned desired_size = 6); + + private: + NeighboursInterface& m_neighbours; + Result m_result; + + // KEY: a vertex. VALUE: all its neighbours. + std::map> m_cached_neighbours; + std::set m_cached_full_edges; + + /** How many edges join the given vertex to other existing vertices? + * @param mapping The current vertex permutation which we may expand or + * contract. + * @param vertex A vertex which may or may not be already within the mapping. + * @return The total number of edges within the LARGER graph joining the + * vertex to other vertices within the mapping. + */ + size_t get_edge_count(const VertexMapping& mapping, size_t vertex); + + /** Try to add a single new fixed vertex to the mapping, i.e. a new v with + * map[v]=v. + * @param mapping The current vertex permutation which we wish to expand by + * one vertex. + */ + void add_vertex(VertexMapping& mapping); + + /** Try to remove a single vertex within the mapping, but only if it is fixed, + * i.e. map[v]==v. + * @param mapping The current vertex permutation which we wish to shrink by + * one vertex. + */ + void remove_vertex(VertexMapping& mapping); + + /** Within the m_result object, fill "edges" for the new mapping. */ + void fill_result_edges(const VertexMapping& mapping); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp similarity index 100% rename from tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp diff --git a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp similarity index 100% rename from tket/src/TokenSwapping/TSAUtils/VertexSwapResult.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp diff --git a/tket/src/TokenSwapping/main_entry_functions.cpp b/tket/src/TokenSwapping/main_entry_functions.cpp index f315b14aeb..0489629e20 100644 --- a/tket/src/TokenSwapping/main_entry_functions.cpp +++ b/tket/src/TokenSwapping/main_entry_functions.cpp @@ -18,8 +18,8 @@ #include #include "BestFullTsa.hpp" -#include "TSAUtils/VertexMappingFunctions.hpp" #include "Utils/Assert.hpp" +#include "VertexMappingFunctions.hpp" namespace tket { diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp index dff7cc4b15..504360f9f0 100644 --- a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp @@ -17,7 +17,7 @@ #include #include "TokenSwapping/NeighboursInterface.hpp" -#include "TokenSwapping/TSAUtils/SwapFunctions.hpp" +#include "TokenSwapping/SwapFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp index ad488009d3..74e4a27ade 100644 --- a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp @@ -17,10 +17,10 @@ #include #include "NeighboursFromEdges.hpp" -#include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" -#include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" -#include "TokenSwapping/TableLookup/SwapListSegmentOptimiser.hpp" -#include "TokenSwapping/TableLookup/VertexMapResizing.hpp" +#include "TokenSwapping/SwapListSegmentOptimiser.hpp" +#include "TokenSwapping/VertexMapResizing.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp index c02f6872a8..2adcf5f6d0 100644 --- a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp @@ -18,7 +18,7 @@ #include "../TestUtils/DecodedProblemData.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" -#include "TokenSwapping/TableLookup/SwapListTableOptimiser.hpp" +#include "TokenSwapping/SwapListTableOptimiser.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp index f992bdbddc..da96d908c1 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp @@ -18,8 +18,8 @@ #include #include "PermutationTestUtils.hpp" +#include "TokenSwapping/CanonicalRelabelling.hpp" #include "TokenSwapping/RNG.hpp" -#include "TokenSwapping/TableLookup/CanonicalRelabelling.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp index 82b91fdfda..9fbebf93cc 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp @@ -15,10 +15,10 @@ #include #include -#include "TokenSwapping/TSAUtils/DebugFunctions.hpp" -#include "TokenSwapping/TSAUtils/GeneralFunctions.hpp" -#include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" -#include "TokenSwapping/TableLookup/ExactMappingLookup.hpp" +#include "TokenSwapping/DebugFunctions.hpp" +#include "TokenSwapping/ExactMappingLookup.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp index 00e76fe5b4..c169f84058 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp @@ -16,8 +16,8 @@ #include #include +#include "TokenSwapping/FilteredSwapSequences.hpp" #include "TokenSwapping/RNG.hpp" -#include "TokenSwapping/TableLookup/FilteredSwapSequences.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp index eed4159c5e..6bc7ed97fa 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp @@ -17,9 +17,9 @@ #include #include "PermutationTestUtils.hpp" +#include "TokenSwapping/SwapConversion.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" -#include "TokenSwapping/TableLookup/SwapConversion.hpp" -#include "TokenSwapping/TableLookup/SwapSequenceTable.hpp" +#include "TokenSwapping/SwapSequenceTable.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp index a10bd9def7..ab7c31886b 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp @@ -17,8 +17,8 @@ #include #include "TokenSwapping/ArchitectureMapping.hpp" -#include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" -#include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp index 8f7fb94d6c..a489add6e7 100644 --- a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp @@ -16,8 +16,8 @@ #include -#include "TokenSwapping/TSAUtils/GeneralFunctions.hpp" -#include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp index abc5578ff8..7a10b40fa2 100644 --- a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp @@ -17,7 +17,7 @@ #include #include -#include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp index 16e7a57425..7659964336 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp @@ -17,12 +17,12 @@ #include #include "TokenSwapping/ArchitectureMapping.hpp" +#include "TokenSwapping/DebugFunctions.hpp" +#include "TokenSwapping/DistanceFunctions.hpp" #include "TokenSwapping/DistancesFromArchitecture.hpp" #include "TokenSwapping/NeighboursFromArchitecture.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" -#include "TokenSwapping/TSAUtils/DebugFunctions.hpp" -#include "TokenSwapping/TSAUtils/DistanceFunctions.hpp" -#include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp index a0edae3ddb..722a6825a1 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp @@ -17,11 +17,11 @@ #include #include "TestStatsStructs.hpp" +#include "TokenSwapping/DistanceFunctions.hpp" #include "TokenSwapping/DistancesFromArchitecture.hpp" #include "TokenSwapping/NeighboursFromArchitecture.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" -#include "TokenSwapping/TSAUtils/DistanceFunctions.hpp" -#include "TokenSwapping/TSAUtils/VertexSwapResult.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp index 7b7caaf558..f51cea6763 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp @@ -16,7 +16,7 @@ #include -#include "TokenSwapping/TSAUtils/GeneralFunctions.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp index 168a149704..a8043b5a0d 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp @@ -16,7 +16,7 @@ #include "Architecture/Architecture.hpp" #include "TokenSwapping/RNG.hpp" -#include "TokenSwapping/TSAUtils/VertexMappingFunctions.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/tests/TokenSwapping/test_SwapList.cpp b/tket/tests/TokenSwapping/test_SwapList.cpp index bd685d2c49..4769d470b5 100644 --- a/tket/tests/TokenSwapping/test_SwapList.cpp +++ b/tket/tests/TokenSwapping/test_SwapList.cpp @@ -16,7 +16,7 @@ #include #include -#include "TokenSwapping/TSAUtils/SwapFunctions.hpp" +#include "TokenSwapping/SwapFunctions.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp index 7c76a4687f..85242d0f36 100644 --- a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp +++ b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp @@ -17,9 +17,9 @@ #include #include +#include "TokenSwapping/DebugFunctions.hpp" #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" -#include "TokenSwapping/TSAUtils/DebugFunctions.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp index 503338a742..aba973ec54 100644 --- a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp +++ b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp @@ -18,9 +18,9 @@ #include "TestUtils/PartialTsaTesting.hpp" #include "TestUtils/ProblemGeneration.hpp" #include "TokenSwapping/CyclesPartialTsa.hpp" +#include "TokenSwapping/DebugFunctions.hpp" #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" -#include "TokenSwapping/TSAUtils/DebugFunctions.hpp" #include "TokenSwapping/TrivialTSA.hpp" using std::vector; From e0d7dee7cb79fbeafbed9513cf16f670c5ae875c Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 2 Feb 2022 11:43:27 +0000 Subject: [PATCH 024/146] clang format --- tket/src/Placement/include/Placement/Placement.hpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/tket/src/Placement/include/Placement/Placement.hpp b/tket/src/Placement/include/Placement/Placement.hpp index a3241e34be..0bfdaf8f0a 100644 --- a/tket/src/Placement/include/Placement/Placement.hpp +++ b/tket/src/Placement/include/Placement/Placement.hpp @@ -277,9 +277,6 @@ class LinePlacement : public Placement { const Circuit& circ_) const override; }; - - - class GraphPlacement : public Placement { public: explicit GraphPlacement(const Architecture& _arc) { From 33436f1e9a176c2d4e5954ef30796f37dc1d1ea1 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 2 Feb 2022 13:47:50 +0000 Subject: [PATCH 025/146] update linking for pytket --- pytket/setup.py | 5 +++-- tket/src/Predicates/include/Predicates/Predicates.hpp | 1 + tket/tests/CMakeLists.txt | 4 +++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pytket/setup.py b/pytket/setup.py index d707068041..76962a0679 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -125,12 +125,13 @@ def run(self): "tket-Program", "tket-Characterisation", "tket-Converters", - "tket-Routing", + "tket-TokenSwapping", + "tket-Placement", + "tket-Mapping", "tket-MeasurementSetup", "tket-Transformations", "tket-ArchAwareSynth", "tket-Predicates" - "tket-TokenSwapping", ] for tket_lib in tket_libs: shutil.copy(os.path.join(directory, "lib", libfile(tket_lib)), extdir) diff --git a/tket/src/Predicates/include/Predicates/Predicates.hpp b/tket/src/Predicates/include/Predicates/Predicates.hpp index 53ac6accb4..e7ea785701 100644 --- a/tket/src/Predicates/include/Predicates/Predicates.hpp +++ b/tket/src/Predicates/include/Predicates/Predicates.hpp @@ -15,6 +15,7 @@ #pragma once #include +#include "Architecture/Architecture.hpp" #include "Transformations/Transform.hpp" namespace tket { diff --git a/tket/tests/CMakeLists.txt b/tket/tests/CMakeLists.txt index 527ff6bfc7..4ea994cbdf 100644 --- a/tket/tests/CMakeLists.txt +++ b/tket/tests/CMakeLists.txt @@ -59,7 +59,9 @@ target_link_libraries(test_tket PRIVATE tket-PauliGraph tket-Predicates tket-Program - tket-Routing + tket-Placement + tket-TokenSwapping + tket-Mapping tket-Simulation tket-Transformations tket-Utils From 66ce863e16ab6e7ba50c739eb7c696c9afbf7ffc Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 2 Feb 2022 13:51:49 +0000 Subject: [PATCH 026/146] reformat with black --- pytket/pytket/circuit/display/utils.py | 4 ++-- pytket/pytket/utils/results.py | 6 +++--- pytket/setup.py | 2 +- pytket/tests/qubitpaulioperator_test.py | 2 +- pytket/tests/utils_test.py | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pytket/pytket/circuit/display/utils.py b/pytket/pytket/circuit/display/utils.py index 7c6dfc8a77..4bf222354f 100644 --- a/pytket/pytket/circuit/display/utils.py +++ b/pytket/pytket/circuit/display/utils.py @@ -116,7 +116,7 @@ def format_raw_matrix( ) -> Dict[str, Any]: """Extract the matrix from a box if applicable and format it for display.""" n_qubits = round(math.log(len(unitary), 2)) - basis = [bin(n)[2:].zfill(n_qubits) for n in range(2 ** n_qubits)] + basis = [bin(n)[2:].zfill(n_qubits) for n in range(2**n_qubits)] return { "chart": zip( basis, @@ -133,7 +133,7 @@ def format_bool_matrix( ) -> Dict[str, Any]: """Format a boolean matrix for display.""" n_qubits = round(math.log(len(matrix), 2)) - basis = [bin(n)[2:].zfill(n_qubits) for n in range(2 ** n_qubits)] + basis = [bin(n)[2:].zfill(n_qubits) for n in range(2**n_qubits)] return { "chart": zip( basis, diff --git a/pytket/pytket/utils/results.py b/pytket/pytket/utils/results.py index cd9a5096ec..9c4157bc5f 100644 --- a/pytket/pytket/utils/results.py +++ b/pytket/pytket/utils/results.py @@ -151,7 +151,7 @@ def probs_from_state( :rtype: Dict[Tuple[int], float] """ width = get_n_qb_from_statevector(state) - probs = state.real ** 2 + state.imag ** 2 + probs = state.real**2 + state.imag**2 probs /= sum(probs) ignore = probs < min_p probs[ignore] = 0 @@ -171,7 +171,7 @@ def get_n_qb_from_statevector(state: np.ndarray) -> int: :rtype: int """ n_qb = int(np.log2(state.shape[0])) - if 2 ** n_qb != state.shape[0]: + if 2**n_qb != state.shape[0]: raise ValueError("Size is not a power of 2") return n_qb @@ -189,7 +189,7 @@ def _assert_compatible_state_permutation( :raises ValueError: [description] """ n_qb = len(permutation) - if 2 ** n_qb != state.shape[0]: + if 2**n_qb != state.shape[0]: raise ValueError("Invalid permutation: length does not match number of qubits") diff --git a/pytket/setup.py b/pytket/setup.py index 76962a0679..9a04452272 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -131,7 +131,7 @@ def run(self): "tket-MeasurementSetup", "tket-Transformations", "tket-ArchAwareSynth", - "tket-Predicates" + "tket-Predicates", ] for tket_lib in tket_libs: shutil.copy(os.path.join(directory, "lib", libfile(tket_lib)), extdir) diff --git a/pytket/tests/qubitpaulioperator_test.py b/pytket/tests/qubitpaulioperator_test.py index 5b7ee5313d..814dddaa6e 100644 --- a/pytket/tests/qubitpaulioperator_test.py +++ b/pytket/tests/qubitpaulioperator_test.py @@ -64,7 +64,7 @@ def test_QubitPauliOperator_scalarmult() -> None: assert qpo2 == qpo3 assert qpo2[QubitPauliString(Qubit("q"), Pauli.X)] == x * y qpo2 *= x - assert qpo2[QubitPauliString(Qubit("q"), Pauli.X)] == x ** 2 * y + assert qpo2[QubitPauliString(Qubit("q"), Pauli.X)] == x**2 * y def test_QubitPauliOperator_opmult() -> None: diff --git a/pytket/tests/utils_test.py b/pytket/tests/utils_test.py index a335f6c567..59c161bd01 100644 --- a/pytket/tests/utils_test.py +++ b/pytket/tests/utils_test.py @@ -160,7 +160,7 @@ def test_permute_state_err3() -> None: def test_permute_basis_indexing() -> None: dimensions = 3 bases = 1 << dimensions - matrix = np.arange(bases ** 2).reshape((bases, bases)) + matrix = np.arange(bases**2).reshape((bases, bases)) new_matrix = permute_basis_indexing(matrix, (1, 2, 0)) assert np.array_equal(new_matrix, matrix[[0, 4, 1, 5, 2, 6, 3, 7], :]) From 3aab473bded57237a6b99642339c167cb005dc46 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 2 Feb 2022 13:53:15 +0000 Subject: [PATCH 027/146] Revert "reformat with black" This reverts commit 66ce863e16ab6e7ba50c739eb7c696c9afbf7ffc. --- pytket/pytket/circuit/display/utils.py | 4 ++-- pytket/pytket/utils/results.py | 6 +++--- pytket/setup.py | 2 +- pytket/tests/qubitpaulioperator_test.py | 2 +- pytket/tests/utils_test.py | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pytket/pytket/circuit/display/utils.py b/pytket/pytket/circuit/display/utils.py index 4bf222354f..7c6dfc8a77 100644 --- a/pytket/pytket/circuit/display/utils.py +++ b/pytket/pytket/circuit/display/utils.py @@ -116,7 +116,7 @@ def format_raw_matrix( ) -> Dict[str, Any]: """Extract the matrix from a box if applicable and format it for display.""" n_qubits = round(math.log(len(unitary), 2)) - basis = [bin(n)[2:].zfill(n_qubits) for n in range(2**n_qubits)] + basis = [bin(n)[2:].zfill(n_qubits) for n in range(2 ** n_qubits)] return { "chart": zip( basis, @@ -133,7 +133,7 @@ def format_bool_matrix( ) -> Dict[str, Any]: """Format a boolean matrix for display.""" n_qubits = round(math.log(len(matrix), 2)) - basis = [bin(n)[2:].zfill(n_qubits) for n in range(2**n_qubits)] + basis = [bin(n)[2:].zfill(n_qubits) for n in range(2 ** n_qubits)] return { "chart": zip( basis, diff --git a/pytket/pytket/utils/results.py b/pytket/pytket/utils/results.py index 9c4157bc5f..cd9a5096ec 100644 --- a/pytket/pytket/utils/results.py +++ b/pytket/pytket/utils/results.py @@ -151,7 +151,7 @@ def probs_from_state( :rtype: Dict[Tuple[int], float] """ width = get_n_qb_from_statevector(state) - probs = state.real**2 + state.imag**2 + probs = state.real ** 2 + state.imag ** 2 probs /= sum(probs) ignore = probs < min_p probs[ignore] = 0 @@ -171,7 +171,7 @@ def get_n_qb_from_statevector(state: np.ndarray) -> int: :rtype: int """ n_qb = int(np.log2(state.shape[0])) - if 2**n_qb != state.shape[0]: + if 2 ** n_qb != state.shape[0]: raise ValueError("Size is not a power of 2") return n_qb @@ -189,7 +189,7 @@ def _assert_compatible_state_permutation( :raises ValueError: [description] """ n_qb = len(permutation) - if 2**n_qb != state.shape[0]: + if 2 ** n_qb != state.shape[0]: raise ValueError("Invalid permutation: length does not match number of qubits") diff --git a/pytket/setup.py b/pytket/setup.py index 9a04452272..76962a0679 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -131,7 +131,7 @@ def run(self): "tket-MeasurementSetup", "tket-Transformations", "tket-ArchAwareSynth", - "tket-Predicates", + "tket-Predicates" ] for tket_lib in tket_libs: shutil.copy(os.path.join(directory, "lib", libfile(tket_lib)), extdir) diff --git a/pytket/tests/qubitpaulioperator_test.py b/pytket/tests/qubitpaulioperator_test.py index 814dddaa6e..5b7ee5313d 100644 --- a/pytket/tests/qubitpaulioperator_test.py +++ b/pytket/tests/qubitpaulioperator_test.py @@ -64,7 +64,7 @@ def test_QubitPauliOperator_scalarmult() -> None: assert qpo2 == qpo3 assert qpo2[QubitPauliString(Qubit("q"), Pauli.X)] == x * y qpo2 *= x - assert qpo2[QubitPauliString(Qubit("q"), Pauli.X)] == x**2 * y + assert qpo2[QubitPauliString(Qubit("q"), Pauli.X)] == x ** 2 * y def test_QubitPauliOperator_opmult() -> None: diff --git a/pytket/tests/utils_test.py b/pytket/tests/utils_test.py index 59c161bd01..a335f6c567 100644 --- a/pytket/tests/utils_test.py +++ b/pytket/tests/utils_test.py @@ -160,7 +160,7 @@ def test_permute_state_err3() -> None: def test_permute_basis_indexing() -> None: dimensions = 3 bases = 1 << dimensions - matrix = np.arange(bases**2).reshape((bases, bases)) + matrix = np.arange(bases ** 2).reshape((bases, bases)) new_matrix = permute_basis_indexing(matrix, (1, 2, 0)) assert np.array_equal(new_matrix, matrix[[0, 4, 1, 5, 2, 6, 3, 7], :]) From a9f7478e8c9ef16e9f012654e13786efa3f32b81 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 2 Feb 2022 13:53:23 +0000 Subject: [PATCH 028/146] reformat setup.py --- pytket/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytket/setup.py b/pytket/setup.py index 76962a0679..9a04452272 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -131,7 +131,7 @@ def run(self): "tket-MeasurementSetup", "tket-Transformations", "tket-ArchAwareSynth", - "tket-Predicates" + "tket-Predicates", ] for tket_lib in tket_libs: shutil.copy(os.path.join(directory, "lib", libfile(tket_lib)), extdir) From 0516423c74cf6cbc5b1af868a5967785686a234c Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 2 Feb 2022 14:02:12 +0000 Subject: [PATCH 029/146] update proptests cmakelists --- tket/proptests/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tket/proptests/CMakeLists.txt b/tket/proptests/CMakeLists.txt index 529f3c2366..579c3995a7 100644 --- a/tket/proptests/CMakeLists.txt +++ b/tket/proptests/CMakeLists.txt @@ -49,7 +49,8 @@ target_link_libraries(proptest PRIVATE tket-Ops tket-OpType tket-Predicates - tket-Routing + tket-TokenSwapping + tket-Mapping tket-Simulation tket-Transformations tket-Utils) From 7e37426cb77ad4c9989af2a28ea82e5a047b4d18 Mon Sep 17 00:00:00 2001 From: yao-cqc <75305462+yao-cqc@users.noreply.github.com> Date: Thu, 3 Feb 2022 11:46:46 +0000 Subject: [PATCH 030/146] Feature/add serialisation for multi qubit reorder (#184) * Add JSON serialisation for MultiGateReorderRoutingMethod and getters * Add tests for JSON serialisation for MultiGateReorderRoutingMethod --- tket/src/Mapping/MultiGateReorder.cpp | 22 ++++++++++++++++ tket/src/Mapping/RoutingMethodJson.cpp | 5 ++++ .../include/Mapping/MultiGateReorder.hpp | 18 +++++++++++-- .../include/Mapping/RoutingMethodJson.hpp | 1 + tket/tests/test_MultiGateReorder.cpp | 26 +++++++++++++++++++ tket/tests/test_json.cpp | 18 +++++++++++++ 6 files changed, 88 insertions(+), 2 deletions(-) diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index a7216a2b3a..1b0a2b036b 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -269,4 +269,26 @@ unit_map_t MultiGateReorderRoutingMethod::routing_method( return {}; } +unsigned MultiGateReorderRoutingMethod::get_max_depth() const { + return this->max_depth_; +} + +unsigned MultiGateReorderRoutingMethod::get_max_size() const { + return this->max_size_; +} + +nlohmann::json MultiGateReorderRoutingMethod::serialize() const { + nlohmann::json j; + j["depth"] = this->max_depth_; + j["size"] = this->max_size_; + j["name"] = "MultiGateReorderRoutingMethod"; + return j; +} + +MultiGateReorderRoutingMethod MultiGateReorderRoutingMethod::deserialize( + const nlohmann::json &j) { + return MultiGateReorderRoutingMethod( + j.at("depth").get(), j.at("size").get()); +} + } // namespace tket diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index 55245d45f5..577c8a0380 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -8,6 +8,8 @@ void from_json(const nlohmann::json& j, RoutingMethod& rm) { std::string name = j.at("name").get(); if (name == "LexiRouteRoutingMethod") { rm = LexiRouteRoutingMethod::deserialize(j); + } else if (name == "MultiGateReorderRoutingMethod") { + rm = MultiGateReorderRoutingMethod::deserialize(j); } else { throw JsonError( "Deserialization not yet implemented for generic RoutingMethod " @@ -27,6 +29,9 @@ void from_json(const nlohmann::json& j, std::vector& rmp) { if (name == "LexiRouteRoutingMethod") { rmp.push_back(std::make_shared( LexiRouteRoutingMethod::deserialize(c))); + } else if (name == "MultiGateReorderRoutingMethod") { + rmp.push_back(std::make_shared( + MultiGateReorderRoutingMethod::deserialize(c))); } else { rmp.push_back(std::make_shared(c.get())); } diff --git a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp index 631d2b012b..9a4e4604be 100644 --- a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp +++ b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp @@ -47,7 +47,7 @@ class MultiGateReorderRoutingMethod : public RoutingMethod { */ bool check_method( const std::shared_ptr& /*mapping_frontier*/, - const ArchitecturePtr& /*architecture*/) const; + const ArchitecturePtr& /*architecture*/) const override; /** * @param mapping_frontier Contains boundary of routed/unrouted circuit for @@ -58,7 +58,21 @@ class MultiGateReorderRoutingMethod : public RoutingMethod { */ unit_map_t routing_method( std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const; + const ArchitecturePtr& architecture) const override; + + nlohmann::json serialize() const override; + + static MultiGateReorderRoutingMethod deserialize(const nlohmann::json& j); + + /** + * @return Maximum number of layers of gates checked for commutation. + */ + unsigned get_max_depth() const; + + /** + * @return Maximum number of gates checked for commutation. + */ + unsigned get_max_size() const; private: unsigned max_depth_; diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp index 1c6cb7d359..38a4fd3cde 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp @@ -2,6 +2,7 @@ #define _TKET_RoutingMethodJson_H_ #include "Mapping/LexiRoute.hpp" +#include "Mapping/MultiGateReorder.hpp" #include "Mapping/RoutingMethod.hpp" #include "Utils/Json.hpp" diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index 75621ffa3f..12117c2955 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -357,4 +357,30 @@ SCENARIO("Test MappingManager with MultiGateReorderRoutingMethod") { REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); } } + +SCENARIO("Test JSON serialisation") { + GIVEN("MultiGateReorderRoutingMethod") { + nlohmann::json j_rm; + j_rm["name"] = "MultiGateReorderRoutingMethod"; + j_rm["depth"] = 3; + j_rm["size"] = 4; + MultiGateReorderRoutingMethod rm_loaded = + MultiGateReorderRoutingMethod::deserialize(j_rm); + nlohmann::json j_rm_serialised = rm_loaded.serialize(); + REQUIRE(j_rm == j_rm_serialised); + } + + GIVEN("RoutingMethod vector") { + nlohmann::json j_rms = { + {{"name", "MultiGateReorderRoutingMethod"}, {"depth", 3}, {"size", 4}}, + { + {"name", "LexiRouteRoutingMethod"}, + {"depth", 3}, + }}; + std::vector rms = + j_rms.get>(); + nlohmann::json j_rms_serialised = rms; + REQUIRE(j_rms == j_rms_serialised); + } +} } // namespace tket \ No newline at end of file diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index 868306f1cc..37295a1f49 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -606,6 +606,24 @@ SCENARIO("Test compiler pass serializations") { nlohmann::json j_loaded = loaded; REQUIRE(j_pp == j_loaded); } + GIVEN("Routing with MultiGateReorderRoutingMethod") { + RoutingMethodPtr mrmp = + std::make_shared(60, 80); + std::vector mrcon = {mrmp, rmp}; + Circuit circ = CircuitsForTesting::get().uccsd; + CompilationUnit cu{circ}; + PassPtr placement = gen_placement_pass(place); + placement->apply(cu); + CompilationUnit copy = cu; + PassPtr pp = gen_routing_pass(arc, mrcon); + nlohmann::json j_pp = pp; + PassPtr loaded = j_pp.get(); + pp->apply(cu); + loaded->apply(copy); + REQUIRE(cu.get_circ_ref() == copy.get_circ_ref()); + nlohmann::json j_loaded = loaded; + REQUIRE(j_pp == j_loaded); + } #define COMPPASSDESERIALIZE(passname, pass) \ GIVEN(#passname) { \ Circuit circ = CircuitsForTesting::get().uccsd; \ From 25bf193c7bc93dd5f37a4c81f40b759efaf979f9 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Thu, 3 Feb 2022 16:14:13 +0000 Subject: [PATCH 031/146] Update test coverage for RoutingMethod serialization --- tket/src/Mapping/RoutingMethodJson.cpp | 23 +++++++--------- .../src/Mapping/include/Mapping/LexiRoute.hpp | 2 +- .../Mapping/include/Mapping/RoutingMethod.hpp | 6 +++-- .../include/Mapping/RoutingMethodJson.hpp | 6 ++--- tket/tests/test_json.cpp | 26 +++++++++++++++++++ 5 files changed, 43 insertions(+), 20 deletions(-) diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index 55245d45f5..f0b81599a4 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -4,31 +4,26 @@ namespace tket { void to_json(nlohmann::json& j, const RoutingMethod& rm) { j = rm.serialize(); } -void from_json(const nlohmann::json& j, RoutingMethod& rm) { - std::string name = j.at("name").get(); - if (name == "LexiRouteRoutingMethod") { - rm = LexiRouteRoutingMethod::deserialize(j); - } else { - throw JsonError( - "Deserialization not yet implemented for generic RoutingMethod " - "objects."); - } +void from_json(const nlohmann::json& /*j*/, RoutingMethod& rm) { + rm = RoutingMethod(); } -void to_json(nlohmann::json& j, const std::vector& rmp) { - for (const auto& r : rmp) { +void to_json(nlohmann::json& j, const std::vector& rmp_v) { + for (const auto& r : rmp_v) { j.push_back(*r); } } -void from_json(const nlohmann::json& j, std::vector& rmp) { +void from_json(const nlohmann::json& j, std::vector& rmp_v) { for (const auto& c : j) { std::string name = c.at("name").get(); if (name == "LexiRouteRoutingMethod") { - rmp.push_back(std::make_shared( + rmp_v.push_back(std::make_shared( LexiRouteRoutingMethod::deserialize(c))); + } else if (name == "RoutingMethod") { + rmp_v.push_back(std::make_shared()); } else { - rmp.push_back(std::make_shared(c.get())); + std::logic_error("Serialization for given RoutingMethod not supported."); } } } diff --git a/tket/src/Mapping/include/Mapping/LexiRoute.hpp b/tket/src/Mapping/include/Mapping/LexiRoute.hpp index ce37090a5e..49aef0a62f 100644 --- a/tket/src/Mapping/include/Mapping/LexiRoute.hpp +++ b/tket/src/Mapping/include/Mapping/LexiRoute.hpp @@ -187,7 +187,7 @@ class LexiRouteRoutingMethod : public RoutingMethod { unsigned max_depth_; }; -JSON_DECL(LexiRouteRoutingMethod) +JSON_DECL(LexiRouteRoutingMethod); } // namespace tket diff --git a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp index ceb76e0b65..b2a1136072 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp @@ -49,12 +49,14 @@ class RoutingMethod { } virtual nlohmann::json serialize() const { - throw JsonError( - "JSON serialization not implemented for given RoutingMethod."); + nlohmann::json j; + j["name"] = "RoutingMethod"; + return j; } }; typedef std::shared_ptr RoutingMethodPtr; + } // namespace tket #endif \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp index 1c6cb7d359..9a42326c98 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp @@ -9,13 +9,13 @@ namespace tket { void to_json(nlohmann::json& j, const RoutingMethod& rm); -void from_json(const nlohmann::json& j, RoutingMethod& rm); +void from_json(const nlohmann::json& /*j*/, RoutingMethod& rm); JSON_DECL(RoutingMethod); -void to_json(nlohmann::json& j, const std::vector& rmp); +void to_json(nlohmann::json& j, const std::vector& rmp_v); -void from_json(const nlohmann::json& j, std::vector& rmp); +void from_json(const nlohmann::json& j, std::vector& rmp_v); JSON_DECL(std::vector); diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index 868306f1cc..c862693fbd 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -24,6 +24,8 @@ #include "CircuitsForTesting.hpp" #include "Converters/PhasePoly.hpp" #include "Gate/SymTable.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" #include "OpType/OpType.hpp" #include "Ops/OpPtr.hpp" #include "Predicates/PassGenerators.hpp" @@ -420,6 +422,30 @@ SCENARIO("Test device serializations") { } } +SCENARIO("Test RoutingMethod serializations") { + RoutingMethod rm; + nlohmann::json rm_j = rm; + RoutingMethod loaded_rm_j = rm_j.get(); + + Circuit c(2, 2); + CHECK(!loaded_rm_j.check_method( + std::make_shared(c), + std::make_shared(2, 2))); + + std::vector rmp = { + std::make_shared(rm), + std::make_shared(5)}; + nlohmann::json rmp_j = rmp; + std::vector loaded_rmp_j = + rmp_j.get>(); + CHECK(!loaded_rmp_j[0]->check_method( + std::make_shared(c), + std::make_shared(2, 2))); + CHECK(loaded_rmp_j[1]->check_method( + std::make_shared(c), + std::make_shared(2, 2))); +} + SCENARIO("Test predicate serializations") { #define BASICPREDJSONTEST(classname) \ GIVEN(#classname) { \ From c4f427ec4edfeccc7e04c407d2412d41a53ba16f Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Thu, 3 Feb 2022 16:20:46 +0000 Subject: [PATCH 032/146] make mapping_frontier from mapping_frontier --- tket/src/Mapping/RoutingMethodJson.cpp | 5 +++-- tket/tests/test_MappingFrontier.cpp | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index 4daf772134..bb8c7c543e 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -23,10 +23,11 @@ void from_json(const nlohmann::json& j, std::vector& rmp_v) { } else if (name == "RoutingMethod") { rmp_v.push_back(std::make_shared()); } else if (name == "MultiGateReorderRoutingMethod") { - rmp.push_back(std::make_shared( + rmp_v.push_back(std::make_shared( MultiGateReorderRoutingMethod::deserialize(c))); } else { - std::logic_error("Serialization for given RoutingMethod not supported."); + std::logic_error( + "Deserialization for given RoutingMethod not supported."); } } } diff --git a/tket/tests/test_MappingFrontier.cpp b/tket/tests/test_MappingFrontier.cpp index fb65ce89e4..bb33f0f095 100644 --- a/tket/tests/test_MappingFrontier.cpp +++ b/tket/tests/test_MappingFrontier.cpp @@ -37,7 +37,8 @@ SCENARIO("Test MappingFrontier initialisation, advance_frontier_boundary.") { {qubits[3], nodes[3]}}; circ.rename_units(rename_map); - MappingFrontier mf(circ); + MappingFrontier m(circ); + MappingFrontier mf(m); mf.advance_frontier_boundary(shared_arc); VertPort vp0 = mf.quantum_boundary->get().find(nodes[0])->second; From 991b85dd5ec303db69a9b3b4279779d68b04d222 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Thu, 3 Feb 2022 17:54:14 +0000 Subject: [PATCH 033/146] routing -> mapping --- pytket/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytket/CMakeLists.txt b/pytket/CMakeLists.txt index 42a8f5c06b..b7326df9cb 100644 --- a/pytket/CMakeLists.txt +++ b/pytket/CMakeLists.txt @@ -79,7 +79,7 @@ target_link_libraries(mapping PRIVATE tket-OpType tket-TokenSwapping tket-Utils) -target_link_libraries(routing PRIVATE ${TKET_EXTRA_LIBS}) +target_link_libraries(mapping PRIVATE ${TKET_EXTRA_LIBS}) if (WIN32) # For boost::uuid: target_link_libraries(mapping PRIVATE bcrypt) From df99fbf186f786312e4100437c17e50c4728006f Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Thu, 3 Feb 2022 19:40:15 +0000 Subject: [PATCH 034/146] use TKET_ASSERT_WITH_THROW instead of TKET_ASSERT --- tket/src/Gate/GateUnitaryMatrix.cpp | 2 +- .../Gate/GateUnitaryMatrixVariableQubits.cpp | 12 ++-- tket/src/Gate/GateUnitarySparseMatrix.cpp | 4 +- tket/src/Graphs/AdjacencyData.cpp | 8 +-- tket/src/Simulation/BitOperations.cpp | 2 +- tket/src/Simulation/GateNode.cpp | 13 ++-- .../PauliExpBoxUnitaryCalculator.cpp | 2 +- .../src/TokenSwapping/ArchitectureMapping.cpp | 10 +-- tket/src/TokenSwapping/CyclesPartialTsa.cpp | 12 ++-- .../TokenSwapping/CyclicShiftCostEstimate.cpp | 6 +- .../DistancesFromArchitecture.cpp | 19 +++-- tket/src/TokenSwapping/HybridTsa00.cpp | 6 +- .../NeighboursFromArchitecture.cpp | 26 ++++--- .../src/TokenSwapping/PathFinderInterface.cpp | 2 + tket/src/TokenSwapping/SwapListOptimiser.cpp | 18 ++--- .../TokenSwapping/TSAUtils/DebugFunctions.cpp | 2 + .../TSAUtils/GeneralFunctions.cpp | 11 +-- .../TSAUtils/VertexMappingFunctions.cpp | 19 +++-- .../TableLookup/CanonicalRelabelling.cpp | 16 ++--- .../TableLookup/ExactMappingLookup.cpp | 18 ++--- .../TableLookup/FilteredSwapSequences.cpp | 14 ++-- .../TableLookup/PartialMappingLookup.cpp | 2 +- .../TableLookup/SwapConversion.cpp | 8 +-- .../TableLookup/SwapListSegmentOptimiser.cpp | 12 ++-- .../TableLookup/SwapListTableOptimiser.cpp | 20 +++--- .../TableLookup/VertexMapResizing.cpp | 12 ++-- .../VectorListHybridSkeleton.cpp | 72 +++++++++---------- .../TokenSwapping/VectorListHybrid.hpp | 6 +- .../TokenSwapping/main_entry_functions.cpp | 5 +- 29 files changed, 186 insertions(+), 173 deletions(-) diff --git a/tket/src/Gate/GateUnitaryMatrix.cpp b/tket/src/Gate/GateUnitaryMatrix.cpp index 06b6808d2d..18f877ebb8 100644 --- a/tket/src/Gate/GateUnitaryMatrix.cpp +++ b/tket/src/Gate/GateUnitaryMatrix.cpp @@ -157,7 +157,7 @@ static Eigen::MatrixXcd get_unitary_for_ordinary_fixed_size_case( const Eigen::MatrixXcd matr = get_unitary_or_throw(op_type, number_of_qubits, parameters); - TKET_ASSERT(matr.cols() == matr.rows()); + TKET_ASSERT_WITH_THROW(matr.cols() == matr.rows()); const auto expected_number_of_qubits = get_number_of_qubits(matr.cols()); if (expected_number_of_qubits == number_of_qubits) { return matr; diff --git a/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp b/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp index a6aaf317dd..e6c537bda6 100644 --- a/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp +++ b/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp @@ -50,27 +50,27 @@ unsigned GateUnitaryMatrixVariableQubits::get_number_of_parameters() const { Eigen::MatrixXcd GateUnitaryMatrixVariableQubits::get_dense_unitary( unsigned number_of_qubits, const std::vector& parameters) const { // This class is internal only, so an assert is OK. - TKET_ASSERT(known_type); - TKET_ASSERT(parameters.size() == number_of_parameters); + TKET_ASSERT_WITH_THROW(known_type); + TKET_ASSERT_WITH_THROW(parameters.size() == number_of_parameters); switch (parameters.size()) { case 0: - TKET_ASSERT(op_type == OpType::CnX); + TKET_ASSERT_WITH_THROW(op_type == OpType::CnX); return GateUnitaryMatrixImplementations::CnX(number_of_qubits); case 1: if (op_type == OpType::CnRy) { return GateUnitaryMatrixImplementations::CnRy( number_of_qubits, parameters[0]); } else { - TKET_ASSERT(op_type == OpType::PhaseGadget); + TKET_ASSERT_WITH_THROW(op_type == OpType::PhaseGadget); return GateUnitaryMatrixImplementations::PhaseGadget( number_of_qubits, parameters[0]); } case 2: - TKET_ASSERT(op_type == OpType::NPhasedX); + TKET_ASSERT_WITH_THROW(op_type == OpType::NPhasedX); return GateUnitaryMatrixImplementations::NPhasedX( number_of_qubits, parameters[0], parameters[1]); default: - TKET_ASSERT(false); + TKET_ASSERT_WITH_THROW(false); } } diff --git a/tket/src/Gate/GateUnitarySparseMatrix.cpp b/tket/src/Gate/GateUnitarySparseMatrix.cpp index 00a4c1a735..1780c0c996 100644 --- a/tket/src/Gate/GateUnitarySparseMatrix.cpp +++ b/tket/src/Gate/GateUnitarySparseMatrix.cpp @@ -107,7 +107,7 @@ const FixedTripletsWithNoParameters& FixedTripletsWithNoParameters::get( GateUnitaryMatrixUtils::check_and_throw_upon_wrong_number_of_parameters( gate.get_type(), gate.n_qubits(), GateUnitaryMatrixUtils::get_checked_parameters(gate), 0); - TKET_ASSERT(gate.n_qubits() == 3); + TKET_ASSERT_WITH_THROW(gate.n_qubits() == 3); return data; } } // namespace @@ -153,12 +153,14 @@ std::vector GateUnitarySparseMatrix::get_unitary_triplets( return convert_1qb_type_to_controlled_type_and_get_triplets( gate, primitive_type, abs_epsilon); } catch (const GateUnitaryMatrixError& e) { + // GCOVR_EXCL_START std::stringstream ss; OpDesc desc(primitive_type); ss << "Converting " << gate.get_name() << " to sparse unitary, via adding controls to gate type " << desc.name() << ": " << e.what(); throw GateUnitaryMatrixError(ss.str(), e.cause); + // GCOVR_EXCL_STOP } } return get_triplets_for_noncontrolled_gate(gate); diff --git a/tket/src/Graphs/AdjacencyData.cpp b/tket/src/Graphs/AdjacencyData.cpp index 7589513c6e..18ea0872be 100644 --- a/tket/src/Graphs/AdjacencyData.cpp +++ b/tket/src/Graphs/AdjacencyData.cpp @@ -65,7 +65,7 @@ string AdjacencyData::to_string() const { const set& AdjacencyData::get_neighbours( std::size_t vertex) const { - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( vertex < m_cleaned_data.size() || AssertMessage() << "AdjacencyData: get_neighbours called with invalid vertex " @@ -101,7 +101,7 @@ bool AdjacencyData::add_edge(std::size_t i, std::size_t j) { } bool AdjacencyData::edge_exists(std::size_t i, std::size_t j) const { - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( (i < m_cleaned_data.size() && j < m_cleaned_data.size()) || AssertMessage() << "edge_exists called with vertices " << i << ", " << j << ", but there are only " << m_cleaned_data.size() @@ -143,11 +143,11 @@ AdjacencyData::AdjacencyData( for (std::size_t i = 0; i < m_cleaned_data.size(); ++i) { for (std::size_t j : raw_data[i]) { - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( i != j || allow_loops || AssertMessage() << "vertex " << i << " out of " << m_cleaned_data.size() << " has a loop."); - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( j < m_cleaned_data.size() || AssertMessage() << "vertex " << i << " has illegal neighbour vertex " << j << ", the size is " << m_cleaned_data.size()); diff --git a/tket/src/Simulation/BitOperations.cpp b/tket/src/Simulation/BitOperations.cpp index 4ccb7fddf9..418a0b8a79 100644 --- a/tket/src/Simulation/BitOperations.cpp +++ b/tket/src/Simulation/BitOperations.cpp @@ -46,7 +46,7 @@ ExpansionData get_expansion_data( auto test_bit = next_bit; for (unsigned left_shift_arg = 0;; ++left_shift_arg) { if ((test_bit & forbidden_bits) == 0) { - TKET_ASSERT(test_bit != 0); + TKET_ASSERT_WITH_THROW(test_bit != 0); // A free space has been found. push_back(result, next_bit, left_shift_arg); forbidden_bits |= test_bit; diff --git a/tket/src/Simulation/GateNode.cpp b/tket/src/Simulation/GateNode.cpp index fe40d96ef2..1af51fb425 100644 --- a/tket/src/Simulation/GateNode.cpp +++ b/tket/src/Simulation/GateNode.cpp @@ -168,9 +168,9 @@ struct LiftedBitsResult { void LiftedBitsResult::set( const std::vector& qubits, unsigned full_number_of_qubits) { - TKET_ASSERT(full_number_of_qubits >= qubits.size()); - TKET_ASSERT(full_number_of_qubits < 32); - TKET_ASSERT(!qubits.empty()); + TKET_ASSERT_WITH_THROW(full_number_of_qubits >= qubits.size()); + TKET_ASSERT_WITH_THROW(full_number_of_qubits < 32); + TKET_ASSERT_WITH_THROW(!qubits.empty()); translated_bits.assign(get_matrix_size(qubits.size()), 0); @@ -179,7 +179,7 @@ void LiftedBitsResult::set( SimUInt k_string_bit = 1; for (unsigned count = 0; count < qubits.size(); ++count) { - TKET_ASSERT(full_number_of_qubits >= qubits[count] + 1); + TKET_ASSERT_WITH_THROW(full_number_of_qubits >= qubits[count] + 1); // This will be a bit within the length n string. SimUInt long_string_bit = 1; @@ -225,9 +225,8 @@ static void set_lifted_triplets( const SimUInt free_bits_limit = get_matrix_size(full_number_of_qubits - qubits.size()); - if (free_bits_limit == 0) { - throw std::runtime_error("Too many bits"); - } + TKET_ASSERT_WITH_THROW(free_bits_limit != 0 || !"Too many bits"); + for (SimUInt free_bits = 0; free_bits < free_bits_limit; ++free_bits) { const SimUInt expanded_free_bits = get_expanded_bits(expansion_data, free_bits); diff --git a/tket/src/Simulation/PauliExpBoxUnitaryCalculator.cpp b/tket/src/Simulation/PauliExpBoxUnitaryCalculator.cpp index fe58c9523f..36e9651cc9 100644 --- a/tket/src/Simulation/PauliExpBoxUnitaryCalculator.cpp +++ b/tket/src/Simulation/PauliExpBoxUnitaryCalculator.cpp @@ -125,7 +125,7 @@ void PauliExpBoxUnitaryCalculator::clear() { void PauliExpBoxUnitaryCalculator::add_entries( unsigned sparse_matrix_index, Pauli pauli) { - TKET_ASSERT(sparse_matrix_index < sparse_matrix.size()); + TKET_ASSERT_WITH_THROW(sparse_matrix_index < sparse_matrix.size()); const auto& single_pauli = pauli_map.at(pauli); sparse_matrix.push_back( get_combined_entry(sparse_matrix[sparse_matrix_index], single_pauli[0])); diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index 4fa444a17c..8dc811a9ea 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -34,7 +34,7 @@ ArchitectureMapping::ArchitectureMapping(const Architecture& arch) const auto& node = m_vertex_to_node_mapping[ii]; { const auto citer = m_node_to_vertex_mapping.find(node); - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( citer == m_node_to_vertex_mapping.cend() || AssertMessage() << "Duplicate node " << node.repr() << " at vertices " << citer->second << ", " << ii); @@ -68,7 +68,7 @@ ArchitectureMapping::ArchitectureMapping( // Check that the nodes agree with the architecture object. const auto uids = arch.nodes(); - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( uids.size() == m_vertex_to_node_mapping.size() || AssertMessage() << "passed in " << edges.size() << " edges, giving " << m_vertex_to_node_mapping.size() @@ -77,7 +77,7 @@ ArchitectureMapping::ArchitectureMapping( for (const UnitID& uid : uids) { const Node node(uid); - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( m_node_to_vertex_mapping.count(node) != 0 || AssertMessage() << "passed in " << edges.size() << " edges, giving " @@ -93,7 +93,7 @@ size_t ArchitectureMapping::number_of_vertices() const { const Node& ArchitectureMapping::get_node(size_t vertex) const { const auto num_vertices = number_of_vertices(); - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( vertex < num_vertices || AssertMessage() << "get_node: invalid vertex " << vertex << " (architecture only has " << num_vertices @@ -104,7 +104,7 @@ const Node& ArchitectureMapping::get_node(size_t vertex) const { size_t ArchitectureMapping::get_vertex(const Node& node) const { const auto citer = m_node_to_vertex_mapping.find(node); - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( citer != m_node_to_vertex_mapping.cend() || AssertMessage() << "get_vertex: node " << node.repr() << " has no vertex number"); diff --git a/tket/src/TokenSwapping/CyclesPartialTsa.cpp b/tket/src/TokenSwapping/CyclesPartialTsa.cpp index 914bc8dd63..ede47f3b44 100644 --- a/tket/src/TokenSwapping/CyclesPartialTsa.cpp +++ b/tket/src/TokenSwapping/CyclesPartialTsa.cpp @@ -23,6 +23,7 @@ namespace tsa_internal { CyclesPartialTsa::CyclesPartialTsa() { m_name = "Cycles"; } +// GCOVR_EXCL_START CyclesGrowthManager::Options& CyclesPartialTsa::growth_options() { return m_growth_manager.get_options(); } @@ -30,6 +31,7 @@ CyclesGrowthManager::Options& CyclesPartialTsa::growth_options() { CyclesCandidateManager::Options& CyclesPartialTsa::candidate_options() { return m_candidate_manager.get_options(); } +// GCOVR_EXCL_STOP void CyclesPartialTsa::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, @@ -47,20 +49,20 @@ void CyclesPartialTsa::append_partial_solution( single_iteration_partial_solution( swaps, vertex_mapping, distances, neighbours); const auto swap_size_after = swaps.size(); - TKET_ASSERT(swap_size_after >= swap_size_before); + TKET_ASSERT_WITH_THROW(swap_size_after >= swap_size_before); if (swap_size_before == swap_size_after) { break; } } const size_t final_swap_size = swaps.size(); - TKET_ASSERT(initial_swap_size <= final_swap_size); + TKET_ASSERT_WITH_THROW(initial_swap_size <= final_swap_size); if (initial_swap_size == final_swap_size || !path_finder.edge_registration_has_effect()) { return; } // At least one swap was added. const auto current_back_id_opt = swaps.back_id(); - TKET_ASSERT(current_back_id_opt); + TKET_ASSERT_WITH_THROW(current_back_id_opt); auto current_id = current_back_id_opt.value(); for (size_t remaining_swaps = final_swap_size - initial_swap_size;;) { const auto& swap = swaps.at(current_id); @@ -70,7 +72,7 @@ void CyclesPartialTsa::append_partial_solution( break; } const auto prev_id_opt = swaps.previous(current_id); - TKET_ASSERT(prev_id_opt); + TKET_ASSERT_WITH_THROW(prev_id_opt); current_id = prev_id_opt.value(); } } @@ -98,7 +100,7 @@ void CyclesPartialTsa::single_iteration_partial_solution( return; } } - TKET_ASSERT(!"growth_manager termination"); + TKET_ASSERT_WITH_THROW(!"growth_manager termination"); } } // namespace tsa_internal diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp index f5fe4a0050..6dcc9bdd1b 100644 --- a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp @@ -23,7 +23,7 @@ namespace tsa_internal { CyclicShiftCostEstimate::CyclicShiftCostEstimate( const std::vector& vertices, DistancesInterface& distances) { - TKET_ASSERT(vertices.size() >= 2); + TKET_ASSERT_WITH_THROW(vertices.size() >= 2); // We first work out the total distance v(0)->v(1)-> .. -> v(n) -> v(0). // If we snip out v(i)->v(i+1), the remaining path tells us how many swaps // we need. So, we must snip out the LARGEST distance(v(i), v(i+1)). @@ -37,7 +37,7 @@ CyclicShiftCostEstimate::CyclicShiftCostEstimate( size_t v_index_with_largest_distance = vertices.size() - 1; for (size_t ii = 0; ii + 1 < vertices.size(); ++ii) { const auto distance_i = distances(vertices[ii], vertices[ii + 1]); - TKET_ASSERT(distance_i > 0); + TKET_ASSERT_WITH_THROW(distance_i > 0); total_distance += distance_i; if (distance_i < largest_distance) { largest_distance = distance_i; @@ -58,7 +58,7 @@ CyclicShiftCostEstimate::CyclicShiftCostEstimate( // What we've currently stored is the sum of dist(x,y), // and clearly (sum)(-1) = -(Number of terms in the sum). estimated_concrete_swaps = 2 * total_distance; - TKET_ASSERT(estimated_concrete_swaps > vertices.size() - 1); + TKET_ASSERT_WITH_THROW(estimated_concrete_swaps > vertices.size() - 1); estimated_concrete_swaps -= vertices.size() - 1; } diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp index ad8039edcb..5195f0369b 100644 --- a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp @@ -74,16 +74,15 @@ size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { // architectures, since get_distance now should throw if v1, v2 are in // different connected components. However, leave the check in, in case some // other bizarre error causes distance zero to be returned. - if (distance_entry == 0) { - std::stringstream ss; - ss << "DistancesFromArchitecture: architecture has " << arch.n_nodes() - << " vertices, " << arch.n_connections() - << " edges; returned diameter " << arch.get_diameter() << ", but d(" - << vertex1 << "," << vertex2 - << ")=0. " - "Is the graph connected?"; - throw std::runtime_error(ss.str()); - } + TKET_ASSERT_WITH_THROW( + distance_entry > 0 || + AssertMessage() << "DistancesFromArchitecture: architecture has " + << arch.n_nodes() << " vertices, " + << arch.n_connections() << " edges; returned diameter " + << arch.get_diameter() << ", but d(" << vertex1 << "," + << vertex2 + << ")=0. " + "Is the graph connected?"); } return distance_entry; } diff --git a/tket/src/TokenSwapping/HybridTsa00.cpp b/tket/src/TokenSwapping/HybridTsa00.cpp index 0ab272412e..6ae5e9fb04 100644 --- a/tket/src/TokenSwapping/HybridTsa00.cpp +++ b/tket/src/TokenSwapping/HybridTsa00.cpp @@ -27,11 +27,13 @@ HybridTsa00::HybridTsa00() { m_trivial_tsa.set(TrivialTSA::Options::BREAK_AFTER_PROGRESS); } +// GCOVR_EXCL_START CyclesPartialTsa& HybridTsa00::get_cycles_tsa_for_testing() { return m_cycles_tsa; } TrivialTSA& HybridTsa00::get_trivial_tsa_for_testing() { return m_trivial_tsa; } +// GCOVR_EXCL_STOP void HybridTsa00::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, @@ -47,11 +49,11 @@ void HybridTsa00::append_partial_solution( swaps, vertex_mapping, distances, neighbours, path_finder); if (swaps_before == swaps.size()) { - TKET_ASSERT(all_tokens_home(vertex_mapping)); + TKET_ASSERT_WITH_THROW(all_tokens_home(vertex_mapping)); return; } } - TKET_ASSERT(!"hybrid TSA termination"); + TKET_ASSERT_WITH_THROW(!"hybrid TSA termination"); } } // namespace tsa_internal diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp index b8ed27bb68..3fb8e4d8b7 100644 --- a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp @@ -28,12 +28,11 @@ NeighboursFromArchitecture::NeighboursFromArchitecture( const std::vector& NeighboursFromArchitecture::operator()( size_t vertex) { const auto num_vertices = m_arch_mapping.number_of_vertices(); - if (vertex >= num_vertices) { - std::stringstream ss; - ss << "get_neighbours: invalid vertex " << vertex << " (only have " - << num_vertices << " vertices)"; - throw std::runtime_error(ss.str()); - } + TKET_ASSERT_WITH_THROW( + vertex < num_vertices || + AssertMessage() << "get_neighbours: invalid vertex " << vertex + << " (only have " << num_vertices << " vertices)"); + auto& neighbours = m_cached_neighbours[vertex]; if (!neighbours.empty()) { // Already cached. @@ -51,14 +50,13 @@ const std::vector& NeighboursFromArchitecture::operator()( for (const Node& node : neighbour_nodes) { const auto neighbour_vertex = m_arch_mapping.get_vertex(node); - if (neighbour_vertex == vertex) { - std::stringstream ss; - ss << "get_neighbours: vertex " << vertex << " for node " << node.repr() - << " has " << neighbour_nodes.size() - << " neighbours, and lists itself as a neighbour (loops not " - "allowed)"; - throw std::runtime_error(ss.str()); - } + TKET_ASSERT_WITH_THROW( + neighbour_vertex != vertex || + AssertMessage() + << "get_neighbours: vertex " << vertex << " for node " + << node.repr() << " has " << neighbour_nodes.size() + << " neighbours, and lists itself as a neighbour (loops not " + "allowed)"); neighbours.push_back(neighbour_vertex); } std::sort(neighbours.begin(), neighbours.end()); diff --git a/tket/src/TokenSwapping/PathFinderInterface.cpp b/tket/src/TokenSwapping/PathFinderInterface.cpp index c9f95ad134..d8d03169e6 100644 --- a/tket/src/TokenSwapping/PathFinderInterface.cpp +++ b/tket/src/TokenSwapping/PathFinderInterface.cpp @@ -23,6 +23,7 @@ PathFinderInterface::PathFinderInterface() : m_name("Empty") {} PathFinderInterface::~PathFinderInterface() {} +// GCOVR_EXCL_START const std::vector& PathFinderInterface::operator()( size_t /*vertex1*/, size_t /*vertex2*/) { throw NotImplemented("PathFinderInterface: get path"); @@ -36,6 +37,7 @@ void PathFinderInterface::register_edge( size_t /*vertex1*/, size_t /*vertex2*/) {} bool PathFinderInterface::edge_registration_has_effect() const { return false; } +// GCOVR_EXCL_STOP } // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/SwapListOptimiser.cpp b/tket/src/TokenSwapping/SwapListOptimiser.cpp index 42773437ce..9fc1417f31 100644 --- a/tket/src/TokenSwapping/SwapListOptimiser.cpp +++ b/tket/src/TokenSwapping/SwapListOptimiser.cpp @@ -77,7 +77,7 @@ SwapListOptimiser::get_id_of_previous_blocker(SwapList& list, SwapID id) { break; } } - TKET_ASSERT(terminated_correctly); + TKET_ASSERT_WITH_THROW(terminated_correctly); // It's hit a copy of itself list.erase(id); list.erase(current_id); @@ -85,7 +85,7 @@ SwapListOptimiser::get_id_of_previous_blocker(SwapList& list, SwapID id) { } bool SwapListOptimiser::move_swap_towards_front(SwapList& list, SwapID id) { - TKET_ASSERT(list.front_id()); + TKET_ASSERT_WITH_THROW(list.front_id()); if (id == list.front_id().value()) { return false; } @@ -166,7 +166,7 @@ void SwapListOptimiser::optimise_pass_with_zero_travel(SwapList& list) { } current_id = next_id_opt.value(); } - TKET_ASSERT(!"optimise_pass_with_zero_travel termination"); + TKET_ASSERT_WITH_THROW(!"optimise_pass_with_zero_travel termination"); } void SwapListOptimiser::optimise_pass_with_frontward_travel(SwapList& list) { @@ -186,7 +186,7 @@ void SwapListOptimiser::optimise_pass_with_frontward_travel(SwapList& list) { } current_id = next_id_opt.value(); } - TKET_ASSERT(!"optimise_pass_with_frontward_travel termination"); + TKET_ASSERT_WITH_THROW(!"optimise_pass_with_frontward_travel termination"); } void SwapListOptimiser::optimise_pass_with_token_tracking(SwapList& list) { @@ -261,14 +261,14 @@ void SwapListOptimiser:: } current_id = next_id_opt.value(); } - TKET_ASSERT(terminated_correctly); + TKET_ASSERT_WITH_THROW(terminated_correctly); const auto new_size = list.size(); if (old_size == new_size) { return; } - TKET_ASSERT(new_size < old_size); + TKET_ASSERT_WITH_THROW(new_size < old_size); } - TKET_ASSERT(!"optimise_pass_with_token_tracking termination"); + TKET_ASSERT_WITH_THROW(!"optimise_pass_with_token_tracking termination"); } void SwapListOptimiser::full_optimise(SwapList& list) { @@ -288,9 +288,9 @@ void SwapListOptimiser::full_optimise( if (old_size == list.size() || list.size() == 0) { return; } - TKET_ASSERT(list.size() < old_size); + TKET_ASSERT_WITH_THROW(list.size() < old_size); } - TKET_ASSERT(!"full_optimise termination"); + TKET_ASSERT_WITH_THROW(!"full_optimise termination"); } } // namespace tsa_internal diff --git a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp index a16b9cca6b..ce4943e493 100644 --- a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp @@ -19,6 +19,7 @@ namespace tket { namespace tsa_internal { +// GCOVR_EXCL_START std::string str(const VertexMapping& vertex_mapping) { std::stringstream ss; ss << "VM:"; @@ -27,6 +28,7 @@ std::string str(const VertexMapping& vertex_mapping) { } return ss.str(); } +// GCOVR_EXCL_STOP std::string str(const SwapList& swaps) { return str(swaps.to_vector()); } diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp index b87bb1902c..4546b7ed40 100644 --- a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp @@ -17,14 +17,16 @@ #include #include +#include "Utils/Assert.hpp" + namespace tket { namespace tsa_internal { std::set get_random_set( RNG& rng, size_t sample_size, size_t population_size) { - if (sample_size > population_size) { - throw std::runtime_error("get_random_set: sample too large"); - } + TKET_ASSERT_WITH_THROW( + sample_size <= population_size || !"get_random_set: sample too large"); + std::set result; if (sample_size == 0 || population_size == 0) { return result; @@ -44,7 +46,8 @@ std::set get_random_set( return result; } } - throw std::runtime_error("get_random_set: dropped out of loop"); + TKET_ASSERT_WITH_THROW(!"get_random_set: dropped out of loop"); + return result; } } // namespace tsa_internal diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index 5d8b1c965c..5f1a83861c 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -36,15 +36,14 @@ void check_mapping( const VertexMapping& vertex_mapping, VertexMapping& work_mapping) { work_mapping.clear(); for (const auto& entry : vertex_mapping) { - if (work_mapping.count(entry.second) == 0) { - work_mapping[entry.second] = entry.first; - } else { - std::stringstream ss; - ss << "Vertices v_" << entry.first << " and v_" - << work_mapping[entry.second] << " both have the same target vertex v_" - << entry.second; - throw std::runtime_error(ss.str()); - } + TKET_ASSERT_WITH_THROW( + work_mapping.count(entry.second) == 0 || + AssertMessage() << "Vertices v_" << entry.first << " and v_" + << work_mapping[entry.second] + << " both have the same target vertex v_" + << entry.second); + + work_mapping[entry.second] = entry.first; } } @@ -80,7 +79,7 @@ size_t get_source_vertex( return entry.first; } } - TKET_ASSERT(!"get_source_vertex"); + TKET_ASSERT_WITH_THROW(!"get_source_vertex"); return target_vertex; } diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp index e26a60b976..f676a35a55 100644 --- a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp @@ -46,8 +46,8 @@ const CanonicalRelabelling::Result& CanonicalRelabelling::operator()( return m_result; } // If not the identity, at least 2 vertices moved. - TKET_ASSERT(desired_mapping.size() >= 2); - TKET_ASSERT(desired_mapping.size() <= 6); + TKET_ASSERT_WITH_THROW(desired_mapping.size() >= 2); + TKET_ASSERT_WITH_THROW(desired_mapping.size() <= 6); m_desired_mapping = desired_mapping; unsigned next_cyc_index = 0; @@ -63,14 +63,14 @@ const CanonicalRelabelling::Result& CanonicalRelabelling::operator()( infinite_loop_guard != 0; --infinite_loop_guard) { const auto curr_v = this_cycle.back(); const auto target_v = m_desired_mapping.at(curr_v); - TKET_ASSERT(m_desired_mapping.erase(curr_v) == 1); + TKET_ASSERT_WITH_THROW(m_desired_mapping.erase(curr_v) == 1); if (target_v == this_cycle[0]) { terminated_correctly = true; break; } this_cycle.push_back(target_v); } - TKET_ASSERT(terminated_correctly); + TKET_ASSERT_WITH_THROW(terminated_correctly); } // Sort by cycle length, LONGEST cycles first. // But, also want a "stable-like" sort: @@ -98,18 +98,18 @@ const CanonicalRelabelling::Result& CanonicalRelabelling::operator()( m_result.new_to_old_vertices.clear(); for (auto ii : m_sorted_cycles_indices) { const auto& cyc = m_cycles[ii]; - TKET_ASSERT(!cyc.empty()); - TKET_ASSERT(cyc.size() <= 6); + TKET_ASSERT_WITH_THROW(!cyc.empty()); + TKET_ASSERT_WITH_THROW(cyc.size() <= 6); for (size_t old_v : cyc) { m_result.new_to_old_vertices.push_back(old_v); } } - TKET_ASSERT(m_result.new_to_old_vertices.size() <= 6); + TKET_ASSERT_WITH_THROW(m_result.new_to_old_vertices.size() <= 6); m_result.old_to_new_vertices.clear(); for (unsigned ii = 0; ii < m_result.new_to_old_vertices.size(); ++ii) { m_result.old_to_new_vertices[m_result.new_to_old_vertices[ii]] = ii; } - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( m_result.new_to_old_vertices.size() == m_result.old_to_new_vertices.size()); diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp index d6c5f96c07..d31a74c9e8 100644 --- a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp @@ -62,11 +62,11 @@ ExactMappingLookup::improve_upon_existing_result( } return m_result; } - TKET_ASSERT(relabelling.permutation_hash != 0); - TKET_ASSERT( + TKET_ASSERT_WITH_THROW(relabelling.permutation_hash != 0); + TKET_ASSERT_WITH_THROW( relabelling.new_to_old_vertices.size() == relabelling.old_to_new_vertices.size()); - TKET_ASSERT(relabelling.new_to_old_vertices.size() >= 2); + TKET_ASSERT_WITH_THROW(relabelling.new_to_old_vertices.size() >= 2); fill_result_from_table(relabelling, edges, max_number_of_swaps); return m_result; @@ -102,8 +102,8 @@ void ExactMappingLookup::fill_result_from_table( } const auto new_v1 = new_v1_opt.value(); const auto new_v2 = new_v2_opt.value(); - TKET_ASSERT(new_v1 <= 5); - TKET_ASSERT(new_v2 <= 5); + TKET_ASSERT_WITH_THROW(new_v1 <= 5); + TKET_ASSERT_WITH_THROW(new_v2 <= 5); new_edges_bitset |= SwapConversion::get_edges_bitset( SwapConversion::get_hash_from_swap(get_swap(new_v1, new_v2))); } @@ -112,13 +112,13 @@ void ExactMappingLookup::fill_result_from_table( relabelling_result.permutation_hash, new_edges_bitset, max_number_of_swaps); - TKET_ASSERT(table_result.number_of_swaps > 0); + TKET_ASSERT_WITH_THROW(table_result.number_of_swaps > 0); if (table_result.number_of_swaps > max_number_of_swaps) { // No result in the table. return; } - TKET_ASSERT(table_result.edges_bitset != 0); - TKET_ASSERT(table_result.swaps_code > 0); + TKET_ASSERT_WITH_THROW(table_result.edges_bitset != 0); + TKET_ASSERT_WITH_THROW(table_result.swaps_code > 0); m_result.success = true; m_result.swaps.clear(); @@ -131,7 +131,7 @@ void ExactMappingLookup::fill_result_from_table( relabelling_result.new_to_old_vertices.at(new_swap.first), relabelling_result.new_to_old_vertices.at(new_swap.second))); } - TKET_ASSERT(m_result.swaps.size() <= 16); + TKET_ASSERT_WITH_THROW(m_result.swaps.size() <= 16); } } // namespace tsa_internal diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp index 39fb0ccbdc..f9ae1b0655 100644 --- a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp +++ b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp @@ -85,10 +85,10 @@ through all entries. void FilteredSwapSequences::initialise( std::vector codes) { // Can only initialise once. - TKET_ASSERT(m_internal_data.empty()); + TKET_ASSERT_WITH_THROW(m_internal_data.empty()); std::sort(codes.begin(), codes.end()); - TKET_ASSERT(!codes.empty()); - TKET_ASSERT(codes[0] != 0); + TKET_ASSERT_WITH_THROW(!codes.empty()); + TKET_ASSERT_WITH_THROW(codes[0] != 0); TrimmedSingleSequenceData datum; for (size_t ii = 0; ii < codes.size(); ++ii) { @@ -104,7 +104,7 @@ void FilteredSwapSequences::initialise( void FilteredSwapSequences::push_back(TrimmedSingleSequenceData datum) { auto bitset_copy = datum.edges_bitset; - TKET_ASSERT(bitset_copy != 0); + TKET_ASSERT_WITH_THROW(bitset_copy != 0); SwapConversion::EdgesBitset bit_to_use = 0; // We want to add to the smallest list, to keep the data balanced. @@ -135,7 +135,7 @@ void FilteredSwapSequences::push_back(TrimmedSingleSequenceData datum) { } } } - TKET_ASSERT(bit_to_use != 0); + TKET_ASSERT_WITH_THROW(bit_to_use != 0); m_internal_data[bit_to_use].push_back(datum); } @@ -230,9 +230,9 @@ construct_and_return_full_table() { // The simplest nontrivial permutation arises from a single swap (a,b), // which under the canonical relabelling is converted to (01), // which has hash 2. - TKET_ASSERT(entry.first >= 2); + TKET_ASSERT_WITH_THROW(entry.first >= 2); // The largest possible hash comes from (01)(23)(45). - TKET_ASSERT(entry.first <= 222); + TKET_ASSERT_WITH_THROW(entry.first <= 222); result[entry.first].initialise(entry.second); } return result; diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp index 891e80770a..2ff95a50d4 100644 --- a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp @@ -54,7 +54,7 @@ const ExactMappingLookup::Result& PartialMappingLookup::operator()( // For next_permutation, let's permute the empty SOURCE vertices. // They are already sorted, thus already at the first permutation // in the ordering, because they came from the keys of desired_mapping. - TKET_ASSERT(std::next_permutation( + TKET_ASSERT_WITH_THROW(std::next_permutation( m_empty_source_vertices.begin(), m_empty_source_vertices.end())); m_altered_mapping = desired_mapping; diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp index 0c8d8ad3ad..76770b9a32 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp @@ -28,7 +28,7 @@ static vector get_swaps_fixed_vector() { swaps.push_back(get_swap(ii, jj)); } } - TKET_ASSERT(swaps.size() == 15); + TKET_ASSERT_WITH_THROW(swaps.size() == 15); return swaps; } @@ -67,8 +67,8 @@ unsigned SwapConversion::get_number_of_swaps( ++num_swaps; const auto swap_hash = swaps_code & 0xF; swaps_code >>= 4; - TKET_ASSERT(swap_hash > 0); - TKET_ASSERT(swap_hash <= 15); + TKET_ASSERT_WITH_THROW(swap_hash > 0); + TKET_ASSERT_WITH_THROW(swap_hash <= 15); } return num_swaps; } @@ -78,7 +78,7 @@ SwapConversion::EdgesBitset SwapConversion::get_edges_bitset( EdgesBitset edges_bitset = 0; while (swaps_code != 0) { const auto swap_hash = swaps_code & 0xF; - TKET_ASSERT(swap_hash > 0); + TKET_ASSERT_WITH_THROW(swap_hash > 0); edges_bitset |= (1u << (swap_hash - 1)); swaps_code >>= 4; } diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp index 9fa6286e5a..84672bd153 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp @@ -79,11 +79,12 @@ SwapListSegmentOptimiser::optimise_segment( bool should_store = m_output.initial_segment_size == 0; if (!should_store) { // Something IS stored, but is our new solution better? - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( m_output.initial_segment_size >= m_best_optimised_swaps.size()); const size_t current_decrease = m_output.initial_segment_size - m_best_optimised_swaps.size(); - TKET_ASSERT(current_number_of_swaps >= lookup_result.swaps.size()); + TKET_ASSERT_WITH_THROW( + current_number_of_swaps >= lookup_result.swaps.size()); const size_t new_decrease = current_number_of_swaps - lookup_result.swaps.size(); should_store = new_decrease > current_decrease; @@ -139,7 +140,8 @@ void SwapListSegmentOptimiser::fill_final_output_and_swaplist( return; } m_output.final_segment_size = m_best_optimised_swaps.size(); - TKET_ASSERT(m_output.final_segment_size <= m_output.initial_segment_size); + TKET_ASSERT_WITH_THROW( + m_output.final_segment_size <= m_output.initial_segment_size); const auto initial_size = swap_list.size(); if (m_best_optimised_swaps.empty()) { @@ -150,7 +152,7 @@ void SwapListSegmentOptimiser::fill_final_output_and_swaplist( initial_id, m_best_optimised_swaps.cbegin(), m_best_optimised_swaps.cend()); - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( overwrite_result.number_of_overwritten_elements == m_best_optimised_swaps.size()); m_output.new_segment_last_id = @@ -166,7 +168,7 @@ void SwapListSegmentOptimiser::fill_final_output_and_swaplist( next_id_opt.value(), remaining_elements_to_erase); } } - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( swap_list.size() + m_output.initial_segment_size == initial_size + m_output.final_segment_size); } diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp index 6478942473..16e36023d1 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp @@ -69,12 +69,12 @@ static bool erase_empty_swaps_interval( case EmptySwapCheckResult::TERMINATE_AFTER_ERASURE: return false; default: - TKET_ASSERT(!"unknown EmptySwapCheckResult enum"); + TKET_ASSERT_WITH_THROW(!"unknown EmptySwapCheckResult enum"); break; } } // Should never get here! - TKET_ASSERT(!"erase_empty_swaps_interval falied to terminate"); + TKET_ASSERT_WITH_THROW(!"erase_empty_swaps_interval falied to terminate"); return false; } @@ -89,16 +89,16 @@ static bool perform_current_nonempty_swap( if (vertices_with_tokens.count(swap.first) == 0) { // No empty swaps! - TKET_ASSERT(vertices_with_tokens.count(swap.second) != 0); + TKET_ASSERT_WITH_THROW(vertices_with_tokens.count(swap.second) != 0); // Second has a token, first doesn't. - TKET_ASSERT(vertices_with_tokens.insert(swap.first).second); - TKET_ASSERT(vertices_with_tokens.erase(swap.second) == 1); + TKET_ASSERT_WITH_THROW(vertices_with_tokens.insert(swap.first).second); + TKET_ASSERT_WITH_THROW(vertices_with_tokens.erase(swap.second) == 1); } else { // First has a token. if (vertices_with_tokens.count(swap.second) == 0) { // Second has no token. - TKET_ASSERT(vertices_with_tokens.erase(swap.first) == 1); - TKET_ASSERT(vertices_with_tokens.insert(swap.second).second); + TKET_ASSERT_WITH_THROW(vertices_with_tokens.erase(swap.first) == 1); + TKET_ASSERT_WITH_THROW(vertices_with_tokens.insert(swap.second).second); } } @@ -142,7 +142,7 @@ void SwapListTableOptimiser::optimise( break; } } - TKET_ASSERT(terminated_correctly); + TKET_ASSERT_WITH_THROW(terminated_correctly); if (swap_list.size() <= 1) { return; } @@ -163,12 +163,12 @@ void SwapListTableOptimiser::optimise( // Must reverse again to get back to start! swap_list.reverse(); const auto new_size = swap_list.size(); - TKET_ASSERT(new_size <= old_size); + TKET_ASSERT_WITH_THROW(new_size <= old_size); if (new_size == old_size) { return; } } - TKET_ASSERT(!"SwapListTableOptimiser::optimise"); + TKET_ASSERT_WITH_THROW(!"SwapListTableOptimiser::optimise"); } void SwapListTableOptimiser::optimise_in_forward_direction( diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp index 7060995b1b..ecb434b566 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp @@ -59,9 +59,9 @@ const VertexMapResizing::Result& VertexMapResizing::resize_mapping( return m_result; } } - TKET_ASSERT(!"VertexMapResizing::resize_mapping"); + TKET_ASSERT_WITH_THROW(!"VertexMapResizing::resize_mapping"); } - TKET_ASSERT(mapping.size() <= desired_size); + TKET_ASSERT_WITH_THROW(mapping.size() <= desired_size); bool terminated_correctly = false; for (auto infinite_loop_guard = 1 + desired_size; infinite_loop_guard > 0; --infinite_loop_guard) { @@ -78,9 +78,9 @@ const VertexMapResizing::Result& VertexMapResizing::resize_mapping( break; } // Must have added exactly one vertex. - TKET_ASSERT(old_size + 1 == new_size); + TKET_ASSERT_WITH_THROW(old_size + 1 == new_size); } - TKET_ASSERT(terminated_correctly); + TKET_ASSERT_WITH_THROW(terminated_correctly); // It's acceptable to have too few vertices, // it can still be looked up in the table. m_result.success = true; @@ -148,8 +148,8 @@ void VertexMapResizing::remove_vertex(VertexMapping& mapping) { } } if (minimum_edges_removed < invalid_number_of_edges) { - TKET_ASSERT(mapping.at(best_vertex) == best_vertex); - TKET_ASSERT(mapping.erase(best_vertex) == 1); + TKET_ASSERT_WITH_THROW(mapping.at(best_vertex) == best_vertex); + TKET_ASSERT_WITH_THROW(mapping.erase(best_vertex) == 1); } } diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp index 8fa6495f79..d41bd78cec 100644 --- a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -37,10 +37,10 @@ VectorListHybridSkeleton::VectorListHybridSkeleton() void VectorListHybridSkeleton::clear() { if (m_links.empty()) { - TKET_ASSERT(m_size == 0); - TKET_ASSERT(m_front == INVALID_INDEX); - TKET_ASSERT(m_back == INVALID_INDEX); - TKET_ASSERT(m_deleted_front == INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_size == 0); + TKET_ASSERT_WITH_THROW(m_front == INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_back == INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_deleted_front == INVALID_INDEX); return; } m_size = 0; @@ -61,13 +61,13 @@ void VectorListHybridSkeleton::clear() { void VectorListHybridSkeleton::fast_clear() { if (m_back == INVALID_INDEX) { // No elements stored currently; nothing to do. - TKET_ASSERT(m_size == 0); - TKET_ASSERT(m_front == INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_size == 0); + TKET_ASSERT_WITH_THROW(m_front == INVALID_INDEX); return; } - TKET_ASSERT(m_size > 0); - TKET_ASSERT(m_front != INVALID_INDEX); - TKET_ASSERT(m_links[m_back].next == INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_size > 0); + TKET_ASSERT_WITH_THROW(m_front != INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_links[m_back].next == INVALID_INDEX); // There are some existing elements. // Recall that deleted elements are ONLY a forward list, // so we don't need to update "previous". @@ -90,9 +90,9 @@ void VectorListHybridSkeleton::reverse() { // Nothing to do. return; } - TKET_ASSERT(m_front != INVALID_INDEX); - TKET_ASSERT(m_back != INVALID_INDEX); - TKET_ASSERT(m_front != m_back); + TKET_ASSERT_WITH_THROW(m_front != INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_back != INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_front != m_back); // The deleted element links don't need to change. { auto current_index = m_front; @@ -103,13 +103,13 @@ void VectorListHybridSkeleton::reverse() { const auto next_index = link.next; std::swap(link.next, link.previous); if (next_index >= m_links.size()) { - TKET_ASSERT(next_index == INVALID_INDEX); + TKET_ASSERT_WITH_THROW(next_index == INVALID_INDEX); terminated_correctly = true; break; } current_index = next_index; } - TKET_ASSERT(terminated_correctly); + TKET_ASSERT_WITH_THROW(terminated_correctly); } std::swap(m_front, m_back); } @@ -160,16 +160,16 @@ void VectorListHybridSkeleton::erase_interval( Index last_element_index = index; for (size_t nn = 1; nn < number_of_elements; ++nn) { last_element_index = m_links.at(last_element_index).next; - if (last_element_index >= m_links.size()) { - std::stringstream ss; - ss << "VectorListHybridSkeleton::erase_interval with start index " - << index << ", number_of_elements=" << number_of_elements << ", size " - << m_links.size() << ", run out of elements at N=" << nn - << " (got index " << last_element_index << ")"; - throw std::runtime_error(ss.str()); - } + + TKET_ASSERT_WITH_THROW( + last_element_index < m_links.size() || + AssertMessage() + << "VectorListHybridSkeleton::erase_interval with start index " + << index << ", number_of_elements=" << number_of_elements + << ", size " << m_links.size() << ", run out of elements at N=" + << nn << " (got index " << last_element_index << ")"); } - TKET_ASSERT(number_of_elements <= m_size); + TKET_ASSERT_WITH_THROW(number_of_elements <= m_size); m_size -= number_of_elements; // Now, splice the soon-to-be-logically-erased interval into the deleted @@ -188,14 +188,14 @@ void VectorListHybridSkeleton::erase_interval( if (index_of_node_before_interval < m_links.size()) { // There IS a previous node to be dealt with. auto& next_node_index_ref = m_links[index_of_node_before_interval].next; - TKET_ASSERT(next_node_index_ref == index); + TKET_ASSERT_WITH_THROW(next_node_index_ref == index); // This is correct even if index_of_node_after_interval is INVALID_INDEX. next_node_index_ref = index_of_node_after_interval; - TKET_ASSERT(m_front != index); + TKET_ASSERT_WITH_THROW(m_front != index); } else { // No previous node, we must have been at the start already. - TKET_ASSERT(index_of_node_before_interval == INVALID_INDEX); - TKET_ASSERT(m_front == index); + TKET_ASSERT_WITH_THROW(index_of_node_before_interval == INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_front == index); m_front = index_of_node_after_interval; } // Link the node AFTER the interval to the new previous node. @@ -203,24 +203,24 @@ void VectorListHybridSkeleton::erase_interval( // There are more unerased elements after the interval, // so the first one must be dealt with. auto& prev_node_index = m_links[index_of_node_after_interval].previous; - TKET_ASSERT(prev_node_index == last_element_index); + TKET_ASSERT_WITH_THROW(prev_node_index == last_element_index); // Correct even if there IS no node before the interval. prev_node_index = index_of_node_before_interval; - TKET_ASSERT(m_back != last_element_index); + TKET_ASSERT_WITH_THROW(m_back != last_element_index); } else { // No node after, we have erased up to the back. - TKET_ASSERT(index_of_node_after_interval == INVALID_INDEX); - TKET_ASSERT(m_back == last_element_index); + TKET_ASSERT_WITH_THROW(index_of_node_after_interval == INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_back == last_element_index); m_back = index_of_node_before_interval; } if (m_size == 0) { - TKET_ASSERT(m_front == INVALID_INDEX); - TKET_ASSERT(m_back == INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_front == INVALID_INDEX); + TKET_ASSERT_WITH_THROW(m_back == INVALID_INDEX); } else { - TKET_ASSERT(m_front < m_links.size()); - TKET_ASSERT(m_back < m_links.size()); + TKET_ASSERT_WITH_THROW(m_front < m_links.size()); + TKET_ASSERT_WITH_THROW(m_back < m_links.size()); if (m_size == 1) { - TKET_ASSERT(m_front == m_back); + TKET_ASSERT_WITH_THROW(m_front == m_back); } } } diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp index 3b3f4a9bce..f09e2b9fae 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp @@ -472,13 +472,13 @@ OverwriteIntervalResult VectorListHybrid::overwrite_interval( OverwriteIntervalResult result; result.final_overwritten_element_id = id; CIter citer = new_elements_cbegin; - TKET_ASSERT(citer != new_elements_cend); + TKET_ASSERT_WITH_THROW(citer != new_elements_cend); const auto max_number_of_elements = m_links_data.size(); result.number_of_overwritten_elements = 0; for (;;) { m_data.at(result.final_overwritten_element_id) = *citer; ++result.number_of_overwritten_elements; - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( result.number_of_overwritten_elements <= max_number_of_elements); ++citer; if (citer == new_elements_cend) { @@ -489,7 +489,7 @@ OverwriteIntervalResult VectorListHybrid::overwrite_interval( m_links_data.next(result.final_overwritten_element_id); } // Should be impossible to reach here - TKET_ASSERT(!"VectorListHybrid::overwrite_interval"); + TKET_ASSERT_WITH_THROW(!"VectorListHybrid::overwrite_interval"); return result; } diff --git a/tket/src/TokenSwapping/main_entry_functions.cpp b/tket/src/TokenSwapping/main_entry_functions.cpp index 0489629e20..2ed9ba9e59 100644 --- a/tket/src/TokenSwapping/main_entry_functions.cpp +++ b/tket/src/TokenSwapping/main_entry_functions.cpp @@ -47,7 +47,7 @@ std::vector> get_swaps( vertex_mapping[arch_mapping.get_vertex(node_entry.first)] = arch_mapping.get_vertex(node_entry.second); } - TKET_ASSERT(vertex_mapping.size() == node_mapping.size()); + TKET_ASSERT_WITH_THROW(vertex_mapping.size() == node_mapping.size()); check_mapping(vertex_mapping); SwapList raw_swap_list; @@ -66,6 +66,8 @@ std::vector> get_swaps( return swaps; } +// TODO: we really should add tests for this! +// GCOVR_EXCL_START std::tuple get_swaps( const Architecture& architecture, const unit_map_t& initial_logical_to_physical_map, @@ -139,5 +141,6 @@ std::tuple get_swaps( } return result; } +// GCOVR_EXCL_STOP } // namespace tket From 8fbe2f4689afbb6f10ead1e0f37cdbc11c0d285e Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Thu, 3 Feb 2022 20:02:37 +0000 Subject: [PATCH 035/146] Replace TKET_ASSERT with throw and use GCOVR_EXCL_START,STOP where appropriate --- tket/src/Graphs/BruteForceColouring.cpp | 18 ++++++++++-------- tket/src/Graphs/ColouringPriority.cpp | 8 ++++++++ tket/src/Graphs/GraphColouring.cpp | 9 +++++++++ .../include/TokenSwapping/GeneralFunctions.hpp | 2 ++ tket/src/Utils/AssertMessage.cpp | 2 ++ tket/src/Utils/include/Utils/AssertMessage.hpp | 2 ++ 6 files changed, 33 insertions(+), 8 deletions(-) diff --git a/tket/src/Graphs/BruteForceColouring.cpp b/tket/src/Graphs/BruteForceColouring.cpp index 85805856be..e5a1891407 100644 --- a/tket/src/Graphs/BruteForceColouring.cpp +++ b/tket/src/Graphs/BruteForceColouring.cpp @@ -91,7 +91,7 @@ struct BruteForceColouring::Impl { // just because CURRENTLY a vertex has only one colour, // that it will ALWAYS be that way! if (initial_clique.count(nodes[node_index].vertex) != 0) { - TKET_ASSERT(earlier_colours.size() == 1); + TKET_ASSERT_WITH_THROW(earlier_colours.size() == 1); forbidden_colours.insert(earlier_colours[0]); } } @@ -215,13 +215,15 @@ BruteForceColouring::BruteForceColouring( } throw std::runtime_error("suggested_number_of_colours hit number_of_nodes"); } catch (const std::exception& e) { - TKET_ASSERT( - AssertMessage() << "initial_suggested_number_of_colours = " - << initial_suggested_number_of_colours - << ", reached suggested_number_of_colours = " - << suggested_number_of_colours << ", had " - << number_of_nodes << " nodes. Error: " << e.what() - << priority.print_raw_data()); + // GCOVR_EXCL_START + std::stringstream ss; + ss << "initial_suggested_number_of_colours = " + << initial_suggested_number_of_colours + << ", reached suggested_number_of_colours = " + << suggested_number_of_colours << ", had " << number_of_nodes + << " nodes. Error: " << e.what() << priority.print_raw_data(); + throw std::runtime_error(ss.str()); + // GCOVR_EXCL_STOP } } diff --git a/tket/src/Graphs/ColouringPriority.cpp b/tket/src/Graphs/ColouringPriority.cpp index 2dcb98cc9d..4dd9a1ab50 100644 --- a/tket/src/Graphs/ColouringPriority.cpp +++ b/tket/src/Graphs/ColouringPriority.cpp @@ -37,12 +37,14 @@ static void fill_initial_node_sequence( try { for (size_t clique_vertex : initial_clique) { + // GCOVR_EXCL_START if (vertices_in_component.count(clique_vertex) == 0) { std::stringstream ss; ss << "initial clique vertex " << clique_vertex << " is not in this component"; throw std::runtime_error(ss.str()); } + // GCOVR_EXCL_STOP nodes.emplace_back(); nodes.back().vertex = clique_vertex; } @@ -76,12 +78,15 @@ static void fill_initial_node_sequence( vertices_to_add.clear(); current_nodes_begin = current_nodes_end; } + // GCOVR_EXCL_START if (nodes.size() != vertices_in_component.size()) { throw std::runtime_error( "Final size check: number of filled " "nodes does not match number of vertices in this component"); } + // GCOVR_EXCL_STOP } catch (const std::exception& e) { + // GCOVR_EXCL_START std::stringstream ss; ss << "ColouringPriority: fill_initial_node_sequence: initial" << " clique size " << initial_clique.size() << ", " @@ -91,6 +96,7 @@ static void fill_initial_node_sequence( << " So far, filled " << nodes.size() << " nodes." << " Error: " << e.what(); throw std::runtime_error(ss.str()); + // GCOVR_EXCL_STOP } } @@ -116,6 +122,7 @@ const ColouringPriority::Nodes& ColouringPriority::get_nodes() const { } string ColouringPriority::print_raw_data(bool relabel_to_simplify) const { + // GCOVR_EXCL_START map old_vertex_to_new_vertex; if (relabel_to_simplify) { for (size_t i = 0; i < m_nodes.size(); ++i) { @@ -167,6 +174,7 @@ string ColouringPriority::print_raw_data(bool relabel_to_simplify) const { } ss << "\n};\n\n"; return ss.str(); + // GCOVR_EXCL_STOP } ColouringPriority::ColouringPriority( diff --git a/tket/src/Graphs/GraphColouring.cpp b/tket/src/Graphs/GraphColouring.cpp index 2e88ec99bd..828009dc40 100644 --- a/tket/src/Graphs/GraphColouring.cpp +++ b/tket/src/Graphs/GraphColouring.cpp @@ -24,6 +24,7 @@ #include "ColouringPriority.hpp" #include "GraphRoutines.hpp" #include "LargeCliquesResult.hpp" +#include "Utils/Assert.hpp" using std::exception; using std::map; @@ -80,6 +81,7 @@ static void colour_single_component( const auto& colour = entry.second; result.number_of_colours = std::max(result.number_of_colours, colour + 1); + // GCOVR_EXCL_START try { if (vertex >= result.colours.size()) { throw runtime_error("illegal vertex index"); @@ -98,6 +100,7 @@ static void colour_single_component( << e.what(); throw runtime_error(ss.str()); } + // GCOVR_EXCL_STOP } } @@ -109,11 +112,13 @@ static void check_final_colouring(GraphColouringResult& result) { result.number_of_colours = 0; for (std::size_t i = 0; i < result.colours.size(); ++i) { const auto colour = result.colours[i]; + // GCOVR_EXCL_START if (colour >= result.colours.size()) { stringstream ss; ss << "vertex " << i << " has unassigned or illegal colour " << colour; throw runtime_error(ss.str()); } + // GCOVR_EXCL_STOP result.number_of_colours = std::max(result.number_of_colours, colour + 1); } } @@ -130,12 +135,14 @@ GraphColouringResult GraphColouringRoutines::get_colouring( const LargeCliquesResult cliques_in_this_component( adjacency_data, connected_components[i]); + // GCOVR_EXCL_START if (cliques_in_this_component.cliques.empty()) { stringstream ss; ss << "component " << i << " has " << connected_components[i].size() << " vertices, but couldn't find a clique!"; throw runtime_error(ss.str()); } + // GCOVR_EXCL_STOP cliques[i] = cliques_in_this_component.cliques[0]; component_indices[i] = i; } @@ -163,12 +170,14 @@ GraphColouringResult GraphColouringRoutines::get_colouring( check_final_colouring(result); return result; } catch (const exception& e) { + // GCOVR_EXCL_START stringstream ss; ss << "GraphColouringRoutines::get_colouring: we had " << connected_components.size() << " connected components, " << adjacency_data.get_number_of_vertices() << " vertices in total: " << e.what(); throw runtime_error(ss.str()); + // GCOVR_EXCL_STOP } } diff --git a/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp index dd3ab9dff4..5f36a4f7d8 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp @@ -50,6 +50,7 @@ std::optional get_optional_value(const std::map& map, const K& key) { */ template std::map get_reversed_map(const std::map& map) { + // GCOVR_EXCL_START std::map reversed_map; for (const auto& entry : map) { reversed_map[entry.second] = entry.first; @@ -58,6 +59,7 @@ std::map get_reversed_map(const std::map& map) { throw std::runtime_error("get_reversed_map called with non-reversible map"); } return reversed_map; + // GCOVR_EXCL_STOP } /** Finds the rightmost "one" (least significant bit) diff --git a/tket/src/Utils/AssertMessage.cpp b/tket/src/Utils/AssertMessage.cpp index 6746810018..d4a497c554 100644 --- a/tket/src/Utils/AssertMessage.cpp +++ b/tket/src/Utils/AssertMessage.cpp @@ -16,6 +16,7 @@ namespace tket { +// GCOVR_EXCL_START AssertMessage::AssertMessage() : m_verbose(false) {} AssertMessage AssertMessage::verbose() { @@ -30,5 +31,6 @@ AssertMessage::MessageData::MessageData(const std::string& str, bool vbose) AssertMessage::operator bool() const { throw MessageData(m_ss.str(), m_verbose); } +// GCOVR_EXCL_STOP } // namespace tket diff --git a/tket/src/Utils/include/Utils/AssertMessage.hpp b/tket/src/Utils/include/Utils/AssertMessage.hpp index 1bc40d46e6..8cf4a764c6 100644 --- a/tket/src/Utils/include/Utils/AssertMessage.hpp +++ b/tket/src/Utils/include/Utils/AssertMessage.hpp @@ -41,12 +41,14 @@ class AssertMessage { /** Throws a MessageData object when called, with the message. */ operator bool() const; + // GCOVR_EXCL_START /** Every streamable object can be written to the stream. */ template AssertMessage& operator<<(const T& x) { m_ss << x; return *this; } + // GCOVR_EXCL_STOP private: bool m_verbose; From 719498abc44beacaa3a5d040444076368e9f6a88 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Thu, 3 Feb 2022 20:07:12 +0000 Subject: [PATCH 036/146] replace throws with TKET_ASSERT_WITH_THROW where appropriate --- .../TokenSwapping/CyclesCandidateManager.cpp | 19 ++++------ .../src/TokenSwapping/CyclesGrowthManager.cpp | 17 ++++----- .../src/TokenSwapping/RiverFlowPathFinder.cpp | 36 +++++++++---------- 3 files changed, 30 insertions(+), 42 deletions(-) diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.cpp b/tket/src/TokenSwapping/CyclesCandidateManager.cpp index 6ce7852929..f4abb692c3 100644 --- a/tket/src/TokenSwapping/CyclesCandidateManager.cpp +++ b/tket/src/TokenSwapping/CyclesCandidateManager.cpp @@ -18,6 +18,7 @@ #include #include +#include "Utils/Assert.hpp" #include "VertexSwapResult.hpp" using std::vector; @@ -40,17 +41,12 @@ size_t CyclesCandidateManager::fill_initial_cycle_ids(const Cycles& cycles) { if (cycle_length == 0) { cycle_length = vertices.size(); - if (cycle_length < 2) { - throw std::runtime_error("Cycles too small"); - } + TKET_ASSERT_WITH_THROW(cycle_length >= 2); } else { - if (cycle_length != vertices.size()) { - throw std::runtime_error("Differing cycle sizes"); - } - } - if (cycle.decrease <= 0) { - throw std::runtime_error("Bad candidates stored"); + TKET_ASSERT_WITH_THROW(cycle_length == vertices.size()); } + TKET_ASSERT_WITH_THROW(cycle.decrease > 0); + // We want 50*(decrease)/(num swaps) >= min_candidate_power_percentage. // (We multiply by 50 because a swap can change L by 2, not 1). if (50 * static_cast(cycle.decrease) < @@ -108,9 +104,8 @@ void CyclesCandidateManager::discard_lower_power_solutions( for (auto id : m_cycles_to_keep) { highest_decrease = std::max(highest_decrease, cycles.at(id).decrease); } - if (highest_decrease <= 0) { - throw std::runtime_error("No good candidate cycles"); - } + TKET_ASSERT_WITH_THROW(highest_decrease > 0); + for (size_t ii = 0; ii < m_cycles_to_keep.size();) { if (cycles.at(m_cycles_to_keep[ii]).decrease < highest_decrease) { // This cycle is not good enough. diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.cpp b/tket/src/TokenSwapping/CyclesGrowthManager.cpp index b27dd92798..efb794520e 100644 --- a/tket/src/TokenSwapping/CyclesGrowthManager.cpp +++ b/tket/src/TokenSwapping/CyclesGrowthManager.cpp @@ -17,6 +17,7 @@ #include #include "TokenSwapping/DistanceFunctions.hpp" +#include "Utils/Assert.hpp" using std::vector; @@ -38,9 +39,8 @@ CyclesGrowthManager::Options& CyclesGrowthManager::get_options() { const Cycles& CyclesGrowthManager::get_cycles( bool throw_if_cycles_are_not_candidates) const { - if (throw_if_cycles_are_not_candidates && !m_cycles_are_candidates) { - throw std::runtime_error("get_cycles called with non-candidate cycles"); - } + TKET_ASSERT_WITH_THROW( + !(throw_if_cycles_are_not_candidates && !m_cycles_are_candidates)); return m_cycles; } @@ -85,11 +85,7 @@ bool CyclesGrowthManager::reset( bool CyclesGrowthManager::attempt_to_close_cycles( const VertexMapping& vertex_mapping, DistancesInterface& distances) { - if (m_cycles_are_candidates) { - throw std::runtime_error( - "Calling attempt_to_close_cycles when we already have " - "candidates"); - } + TKET_ASSERT_WITH_THROW(!m_cycles_are_candidates); for (auto id_opt = m_cycles.front_id(); id_opt;) { const auto id = id_opt.value(); id_opt = m_cycles.next(id); @@ -123,9 +119,8 @@ CyclesGrowthManager::GrowthResult CyclesGrowthManager::attempt_to_grow( NeighboursInterface& neighbours) { GrowthResult result; - if (m_cycles.empty()) { - throw std::runtime_error("Calling attempt_to_grow with no cycles stored"); - } + TKET_ASSERT_WITH_THROW(!m_cycles.empty()); + if (m_cycles.front().vertices.size() >= m_options.max_cycle_size) { m_cycles.clear(); result.hit_cycle_length_limit = true; diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp index 481f808355..a2695ea227 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -81,8 +81,8 @@ void RiverFlowPathFinder::Impl::reset() { void RiverFlowPathFinder::Impl::grow_path( size_t target_vertex, size_t required_path_size) { - TKET_ASSERT(path.size() < required_path_size); - TKET_ASSERT(!path.empty()); + TKET_ASSERT_WITH_THROW(path.size() < required_path_size); + TKET_ASSERT_WITH_THROW(!path.empty()); // We don't yet know how to move on, so we must choose a neighbour. // All candidates will have the same edge count. @@ -114,23 +114,21 @@ void RiverFlowPathFinder::Impl::grow_path( candidate_moves.back().count = edge_count; continue; } - if (neighbour_distance_to_target != remaining_distance && - neighbour_distance_to_target != remaining_distance + 1) { - std::stringstream ss; - ss << "d(v_" << path.back() << ", v_" << target_vertex - << ")=" << remaining_distance << ". But v_" << path.back() - << " has neighbour v_" << neighbour << ", at distance " - << neighbour_distance_to_target << " to the target v_" - << target_vertex; - throw std::runtime_error(ss.str()); - } - } - if (candidate_moves.empty()) { - std::stringstream ss; - ss << "No neighbours of v_" << path.back() << " at correct distance " - << remaining_distance - 1 << " to target vertex v_" << target_vertex; - throw std::runtime_error(ss.str()); + TKET_ASSERT_WITH_THROW( + neighbour_distance_to_target == remaining_distance || + neighbour_distance_to_target == remaining_distance + 1 || + AssertMessage() << "d(v_" << path.back() << ", v_" << target_vertex + << ")=" << remaining_distance << ". But v_" + << path.back() << " has neighbour v_" << neighbour + << ", at distance " << neighbour_distance_to_target + << " to the target v_" << target_vertex); } + TKET_ASSERT_WITH_THROW( + !candidate_moves.empty() || + AssertMessage() << "No neighbours of v_" << path.back() + << " at correct distance " << remaining_distance - 1 + << " to target vertex v_" << target_vertex); + const auto& choice = rng.get_element(candidate_moves); path.push_back(choice.end_vertex); } @@ -172,7 +170,7 @@ const vector& RiverFlowPathFinder::operator()( infinite_loop_guard != 0; --infinite_loop_guard) { m_pimpl->grow_path(vertex2, final_path_size); if (m_pimpl->path.size() == final_path_size) { - TKET_ASSERT(m_pimpl->path.back() == vertex2); + TKET_ASSERT_WITH_THROW(m_pimpl->path.back() == vertex2); m_pimpl->update_data_with_path(); return m_pimpl->path; } From fd09f426cadb1e9b8f339669c548d456ed8d77fa Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Thu, 3 Feb 2022 20:09:18 +0000 Subject: [PATCH 037/146] Add the TKET_ASSERT_WITH_THROW macro --- tket/src/TokenSwapping/TrivialTSA.cpp | 46 +++++++++++++------------ tket/src/Utils/include/Utils/Assert.hpp | 42 ++++++++++++++++++++++ 2 files changed, 66 insertions(+), 22 deletions(-) diff --git a/tket/src/TokenSwapping/TrivialTSA.cpp b/tket/src/TokenSwapping/TrivialTSA.cpp index faf01a58a1..a76e47f29d 100644 --- a/tket/src/TokenSwapping/TrivialTSA.cpp +++ b/tket/src/TokenSwapping/TrivialTSA.cpp @@ -69,9 +69,9 @@ bool TrivialTSA::grow_cycle_forwards( current_id = m_abstract_cycles_vertices.insert_after(current_id); m_abstract_cycles_vertices.at(current_id) = citer->second; } - throw std::runtime_error( - "TrivialTSA::grow_cycle_forwards: " + TKET_ASSERT_WITH_THROW(!"TrivialTSA::grow_cycle_forwards: " "hit vertex count limit; invalid vertex mapping"); + return false; } void TrivialTSA::grow_cycle_backwards(Endpoints& endpoints) { @@ -93,8 +93,7 @@ void TrivialTSA::grow_cycle_backwards(Endpoints& endpoints) { current_id = m_abstract_cycles_vertices.insert_before(current_id); m_abstract_cycles_vertices.at(current_id) = citer->second; } - throw std::runtime_error( - "TrivialTSA::grow_cycle_backwards: " + TKET_ASSERT_WITH_THROW(!"TrivialTSA::grow_cycle_backwards: " "hit vertex count limit; invalid vertex mapping"); } @@ -104,20 +103,21 @@ void TrivialTSA::do_final_checks() const { m_vertices_seen.insert(entry.first); m_vertices_seen.insert(entry.second); } - TKET_ASSERT(m_vertices_seen.size() == m_abstract_cycles_vertices.size()); + TKET_ASSERT_WITH_THROW( + m_vertices_seen.size() == m_abstract_cycles_vertices.size()); // Erase them again...! for (const auto& endpoints : m_cycle_endpoints) { for (auto id = endpoints.first;; id = m_abstract_cycles_vertices.next(id).value()) { - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( m_vertices_seen.erase(m_abstract_cycles_vertices.at(id)) == 1); if (id == endpoints.second) { break; } } } - TKET_ASSERT(m_vertices_seen.empty()); + TKET_ASSERT_WITH_THROW(m_vertices_seen.empty()); } void TrivialTSA::fill_disjoint_abstract_cycles( @@ -143,7 +143,7 @@ void TrivialTSA::fill_disjoint_abstract_cycles( // Now, add the vertices to vertices seen... for (auto id = endpoints.first;; id = m_abstract_cycles_vertices.next(id).value()) { - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( m_vertices_seen.insert(m_abstract_cycles_vertices.at(id)).second); if (id == endpoints.second) { break; @@ -178,7 +178,7 @@ void TrivialTSA::append_partial_solution( append_partial_solution_with_all_cycles(swaps, vertex_mapping, path_finder); return; } - TKET_ASSERT(m_options == Options::BREAK_AFTER_PROGRESS); + TKET_ASSERT_WITH_THROW(m_options == Options::BREAK_AFTER_PROGRESS); // We're only going to do ONE cycle; so find which cycle // has the shortest estimated number of swaps size_t best_estimated_concrete_swaps = std::numeric_limits::max(); @@ -188,27 +188,28 @@ void TrivialTSA::append_partial_solution( for (const auto& endpoints : m_cycle_endpoints) { copy_vertices_to_work_vector(endpoints); if (m_vertices_work_vector.size() < 2) { - TKET_ASSERT(m_vertices_work_vector.size() == 1); + TKET_ASSERT_WITH_THROW(m_vertices_work_vector.size() == 1); continue; } const CyclicShiftCostEstimate estimate(m_vertices_work_vector, distances); - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( estimate.estimated_concrete_swaps < std::numeric_limits::max()); - TKET_ASSERT(estimate.start_v_index < m_vertices_work_vector.size()); + TKET_ASSERT_WITH_THROW( + estimate.start_v_index < m_vertices_work_vector.size()); if (estimate.estimated_concrete_swaps < best_estimated_concrete_swaps) { best_estimated_concrete_swaps = estimate.estimated_concrete_swaps; start_v_index = estimate.start_v_index; best_endpoints = endpoints; } } - TKET_ASSERT( + TKET_ASSERT_WITH_THROW( best_estimated_concrete_swaps < std::numeric_limits::max()); const auto swap_size_before = swaps.size(); const auto decrease = append_partial_solution_with_single_cycle( best_endpoints, start_v_index, swaps, vertex_mapping, distances, path_finder); - TKET_ASSERT(swap_size_before < swaps.size()); - TKET_ASSERT(decrease > 0); + TKET_ASSERT_WITH_THROW(swap_size_before < swaps.size()); + TKET_ASSERT_WITH_THROW(decrease > 0); } void TrivialTSA::copy_vertices_to_work_vector(const Endpoints& endpoints) { @@ -237,9 +238,9 @@ void TrivialTSA::append_partial_solution_with_all_cycles( // Abstract swap(v1, v2). const auto v1 = m_vertices_work_vector[ii]; const auto v2 = m_vertices_work_vector[ii - 1]; - TKET_ASSERT(v1 != v2); + TKET_ASSERT_WITH_THROW(v1 != v2); const auto& path = path_finder(v1, v2); - TKET_ASSERT(path.size() >= 2); + TKET_ASSERT_WITH_THROW(path.size() >= 2); append_swaps_to_interchange_path_ends(path, vertex_mapping, swaps); } } @@ -250,8 +251,8 @@ size_t TrivialTSA::append_partial_solution_with_single_cycle( VertexMapping& vertex_mapping, DistancesInterface& distances, PathFinderInterface& path_finder) { copy_vertices_to_work_vector(endpoints); - TKET_ASSERT(m_vertices_work_vector.size() >= 2); - TKET_ASSERT(start_v_index < m_vertices_work_vector.size()); + TKET_ASSERT_WITH_THROW(m_vertices_work_vector.size() >= 2); + TKET_ASSERT_WITH_THROW(start_v_index < m_vertices_work_vector.size()); // Can go negative! But MUST be >= 1 at the end // (otherwise this cycle was useless and should never have occurred). @@ -267,9 +268,9 @@ size_t TrivialTSA::append_partial_solution_with_single_cycle( const auto v2 = m_vertices_work_vector [((ii - 1) + start_v_index) % m_vertices_work_vector.size()]; - TKET_ASSERT(v1 != v2); + TKET_ASSERT_WITH_THROW(v1 != v2); const auto& path = path_finder(v1, v2); - TKET_ASSERT(path.size() >= 2); + TKET_ASSERT_WITH_THROW(path.size() >= 2); // e.g., to swap endpoints: [x,a,b,c,y] -> [y,a,b,c,x], // do concrete swaps xa ab bc cy bc ab xa. @@ -297,7 +298,8 @@ size_t TrivialTSA::append_partial_solution_with_single_cycle( } // The cycle MUST have decreased L overall, // otherwise we shouldn't have done it. - TKET_ASSERT(!"TrivialTSA::append_partial_solution_with_single_cycle"); + TKET_ASSERT_WITH_THROW( + !"TrivialTSA::append_partial_solution_with_single_cycle"); return 0; } diff --git a/tket/src/Utils/include/Utils/Assert.hpp b/tket/src/Utils/include/Utils/Assert.hpp index 40a2282f61..94f293fd4e 100644 --- a/tket/src/Utils/include/Utils/Assert.hpp +++ b/tket/src/Utils/include/Utils/Assert.hpp @@ -79,3 +79,45 @@ std::abort(); \ } \ } while (0) /* GCOVR_EXCL_STOP */ + +/** Like TKET_ASSERT, but throws an exception instead of aborting + * if the condition is not satisfied. + */ +#define TKET_ASSERT_WITH_THROW(b) \ + /* GCOVR_EXCL_START */ \ + do { \ + bool intended_exception_with_message = false; \ + try { \ + if (!(b)) { \ + std::stringstream msg; \ + msg << "Assertion '" << #b << "' (" << __FILE__ << " : " << __func__ \ + << " : " << __LINE__ << ") failed."; \ + intended_exception_with_message = true; \ + throw std::runtime_error(msg.str()); \ + } \ + } catch (const AssertMessage::MessageData& e1) { \ + std::stringstream msg; \ + msg << "Assertion "; \ + if (e1.verbose) { \ + msg << "'" << #b << "' "; \ + } \ + msg << "(" << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") failed: '" << e1.what() << "'"; \ + throw std::runtime_error(msg.str()); \ + } catch (const std::exception& e2) { \ + if (intended_exception_with_message) { \ + throw std::runtime_error(e2.what()); \ + } \ + std::stringstream msg; \ + msg << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ + << " : " << __func__ << " : " << __LINE__ \ + << ") threw unexpected exception: '" << e2.what() << "'"; \ + throw std::runtime_error(msg.str()); \ + } catch (...) { \ + std::stringstream msg; \ + msg << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ + << " : " << __func__ << " : " << __LINE__ \ + << ") threw unknown exception."; \ + throw std::runtime_error(msg.str()); \ + } \ + } while (0) /* GCOVR_EXCL_STOP */ From fc6bbe8accc7c01eeec2f0827c6fba06b692878f Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Thu, 3 Feb 2022 20:11:23 +0000 Subject: [PATCH 038/146] add simple swap functions tests --- .../TSAUtils/test_SwapFunctions.cpp | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 tket/tests/TokenSwapping/TSAUtils/test_SwapFunctions.cpp diff --git a/tket/tests/TokenSwapping/TSAUtils/test_SwapFunctions.cpp b/tket/tests/TokenSwapping/TSAUtils/test_SwapFunctions.cpp new file mode 100644 index 0000000000..f4666f372f --- /dev/null +++ b/tket/tests/TokenSwapping/TSAUtils/test_SwapFunctions.cpp @@ -0,0 +1,89 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "TokenSwapping/SwapFunctions.hpp" + +using Catch::Matchers::Contains; + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("Get swaps, with exceptions") { + for (size_t ii = 0; ii < 5; ++ii) { + for (size_t jj = 0; jj < 5; ++jj) { + try { + const auto swap = get_swap(ii, jj); + CHECK(ii != jj); + CHECK(swap.first == std::min(ii, jj)); + CHECK(swap.second == std::max(ii, jj)); + } catch (const std::exception& e) { + CHECK(ii == jj); + CHECK_THAT(std::string(e.what()), Contains("equal vertices")); + } + } + } +} + +SCENARIO("Disjoint swaps") { + std::vector swaps; + for (size_t ii = 0; ii < 5; ++ii) { + for (size_t jj = ii + 1; jj < 5; ++jj) { + swaps.push_back(get_swap(ii, jj)); + } + } + std::stringstream disjoint_pairs; + std::stringstream non_disjoint_pairs; + for (const auto& swap1 : swaps) { + for (const auto& swap2 : swaps) { + auto& ss = disjoint(swap1, swap2) ? disjoint_pairs : non_disjoint_pairs; + ss << "[" << swap1.first << swap1.second << " " << swap2.first + << swap2.second << "] "; + } + } + CHECK( + disjoint_pairs.str() == + "[01 23] [01 24] [01 34] [02 13] [02 14] [02 34] [03 12] [03 14] [03 24] " + "[04 " + "12] [04 13] [04 23] [12 03] [12 04] [12 34] [13 02] [13 04] [13 24] [14 " + "02] " + "[14 03] [14 23] [23 01] [23 04] [23 14] [24 01] [24 03] [24 13] [34 01] " + "[34 " + "02] [34 12] "); + CHECK( + non_disjoint_pairs.str() == + "[01 01] [01 02] [01 03] [01 04] [01 12] [01 13] [01 14] [02 01] [02 02] " + "[02 " + "03] [02 04] [02 12] [02 23] [02 24] [03 01] [03 02] [03 03] [03 04] [03 " + "13] " + "[03 23] [03 34] [04 01] [04 02] [04 03] [04 04] [04 14] [04 24] [04 34] " + "[12 " + "01] [12 02] [12 12] [12 13] [12 14] [12 23] [12 24] [13 01] [13 03] [13 " + "12] " + "[13 13] [13 14] [13 23] [13 34] [14 01] [14 04] [14 12] [14 13] [14 14] " + "[14 " + "24] [14 34] [23 02] [23 03] [23 12] [23 13] [23 23] [23 24] [23 34] [24 " + "02] " + "[24 04] [24 12] [24 14] [24 23] [24 24] [24 34] [34 03] [34 04] [34 13] " + "[34 " + "14] [34 23] [34 24] [34 34] "); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket From 3e4ee2080249d9e9ad3eeb12e1c21503e345bbe3 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Thu, 3 Feb 2022 20:13:15 +0000 Subject: [PATCH 039/146] add TKET_ASSERT_WITH_THROW tests, for detailed error messages --- tket/tests/Utils/test_TketAssertWithThrow.cpp | 327 ++++++++++++++++++ tket/tests/tkettestsfiles.cmake | 2 + 2 files changed, 329 insertions(+) create mode 100644 tket/tests/Utils/test_TketAssertWithThrow.cpp diff --git a/tket/tests/Utils/test_TketAssertWithThrow.cpp b/tket/tests/Utils/test_TketAssertWithThrow.cpp new file mode 100644 index 0000000000..a45276d269 --- /dev/null +++ b/tket/tests/Utils/test_TketAssertWithThrow.cpp @@ -0,0 +1,327 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "Utils/Assert.hpp" + +using Catch::Matchers::Contains; + +// An assert function with abort obviously cannot be tested here; +// but we CAN test assert functions which only throw. +namespace tket { +namespace { +// Just ensure that we have checked every message. +class MessageChecker { + public: + explicit MessageChecker(const std::vector& calc_messages) + : m_ii_count(0), m_calc_messages(calc_messages) {} + + const std::string& get_message(int ii) { + ++m_ii_count; + m_values_of_ii_checked.insert(ii); + return m_calc_messages.at(ii); + } + + void final_checks() const { + CHECK(m_values_of_ii_checked.size() == m_calc_messages.size()); + // the ii should be [0,1,2,...,m]. + CHECK(m_values_of_ii_checked.size() == m_ii_count); + CHECK(*m_values_of_ii_checked.cbegin() == 0); + CHECK( + *m_values_of_ii_checked.crbegin() == m_values_of_ii_checked.size() - 1); + } + + private: + unsigned m_ii_count; + const std::vector& m_calc_messages; + std::set m_values_of_ii_checked; +}; + +} // namespace + +static void check_filename_is_included( + const std::vector& messages) { + for (const auto& message : messages) { + CHECK_THAT(message, Contains("test_TketAssertWithThrow.cpp")); + } +} + +static int get_number(int nn) { + if (nn > 15) { + throw std::runtime_error("Error!!"); + } + return nn - 10; +} + +SCENARIO("Simple asserts with throws") { + std::vector calc_messages; + std::vector values_of_nn_with_error; + + for (int nn = 0; nn <= 20; ++nn) { + try { + // Should throw for nn in [3,5] + TKET_ASSERT_WITH_THROW((nn - 3) * (nn - 5) > 0); + + // Should throw for nn in [8,10] + TKET_ASSERT_WITH_THROW( + (nn - 8) * (nn - 10) > 0 || AssertMessage() << "N=" << nn); + + // Should throw for [16,20] (the function throws). + TKET_ASSERT_WITH_THROW(get_number(nn) < 20); + } catch (const std::exception& e) { + values_of_nn_with_error.push_back(nn); + std::stringstream ss; + ss << "CHECK: nn=" << nn << " ; " << e.what(); + calc_messages.emplace_back(ss.str()); + } + } + + CHECK(calc_messages.size() == 11); + check_filename_is_included(calc_messages); + + MessageChecker checker(calc_messages); + + for (int ii = 0; ii <= 2; ++ii) { + const auto& message = checker.get_message(ii); + CHECK_THAT( + message, + Contains(std::string("CHECK: nn=") + std::to_string(ii + 3) + " ; ")); + CHECK_THAT(message, Contains("Assertion '(nn - 3) * (nn - 5) > 0'")); + } + for (int ii = 3; ii <= 5; ++ii) { + const auto& message = checker.get_message(ii); + const std::string n_value = std::to_string(ii + 5); + CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); + CHECK_THAT(message, Contains("Assertion")); + CHECK_THAT(message, Contains("failed:")); + CHECK_THAT(message, Contains(std::string("'N=") + n_value + "'")); + } + for (int ii = 6; ii <= 10; ++ii) { + const auto& message = checker.get_message(ii); + const std::string n_value = std::to_string(ii + 10); + CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); + CHECK_THAT( + message, + Contains("Evaluating assertion condition 'get_number(nn) < 20'")); + CHECK_THAT(message, Contains("threw unexpected exception: 'Error!!'")); + } + CHECK( + values_of_nn_with_error == + std::vector{3, 4, 5, 8, 9, 10, 16, 17, 18, 19, 20}); + checker.final_checks(); +} + +// Throws for nn in [2,5] or [8,10] with message. +static int get_number_with_asserts(int nn) { + TKET_ASSERT_WITH_THROW((nn - 2) * (nn - 5) > 0); + + TKET_ASSERT_WITH_THROW( + (nn - 8) * (nn - 10) > 0 || AssertMessage() << "N=" << nn << ": second"); + + return nn + 5; +} + +SCENARIO("Asserts with throws within calls") { + std::vector calc_messages; + std::vector values_of_nn_with_error; + for (int nn = 0; nn <= 30; ++nn) { + try { + // Throws for [2,5] or [8,10]. + const int mm = get_number_with_asserts(nn); + + // Throws for mm=15,16, so nn=10,11, + // but NOT for 10 because of the above! So only for nn=11. + TKET_ASSERT_WITH_THROW(!(mm >= 15 && mm <= 16)); + + // Throws for [26,30], since mm=n+5. + TKET_ASSERT_WITH_THROW( + mm <= 30 || AssertMessage() << "N=" << nn << ", M=" << mm); + + // Should throw from nn-10, so [12,15] or [18,20] (with message). + TKET_ASSERT_WITH_THROW(get_number_with_asserts(nn - 10) >= nn - 5); + + // Should throw from nn-15, so [17,20] + // (except that [18,20] are covered above, so nn=17 only) + // or [23,25]. + TKET_ASSERT_WITH_THROW( + get_number_with_asserts(nn - 15) >= nn - 10 || + AssertMessage() << "assert with N=" << nn); + } catch (const std::exception& e) { + values_of_nn_with_error.push_back(nn); + std::stringstream ss; + ss << "CHECK: nn=" << nn << " ; " << e.what(); + calc_messages.emplace_back(ss.str()); + } + } + CHECK(calc_messages.size() == 24); + check_filename_is_included(calc_messages); + + MessageChecker checker(calc_messages); + + for (int ii = 0; ii <= 3; ++ii) { + const auto& message = checker.get_message(ii); + CHECK_THAT( + message, + Contains(std::string("CHECK: nn=") + std::to_string(ii + 2) + " ; ")); + // comes from "get_number_with_asserts" + CHECK_THAT(message, Contains("Assertion '(nn - 2) * (nn - 5) > 0'")); + // the function name + CHECK_THAT(message, Contains("get_number_with_asserts")); + } + for (int ii = 4; ii <= 6; ++ii) { + const auto& message = checker.get_message(ii); + const auto n_value = std::to_string(ii + 4); + CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); + // comes from "get_number_with_asserts" + CHECK_THAT(message, Contains("Assertion")); + // the function name + CHECK_THAT(message, Contains("get_number_with_asserts")); + CHECK_THAT(message, Contains(std::string("'N=") + n_value + ": second'")); + + // comes from the second assert in the function, without a message. + CHECK_THAT(message, !Contains("(nn - 2) * (nn - 5)")); + } + { + const auto& message = checker.get_message(7); + CHECK_THAT(message, Contains("CHECK: nn=11 ; ")); + CHECK_THAT(message, Contains("Assertion '!(mm >= 15 && mm <= 16)'")); + + CHECK_THAT(message, !Contains("get_number_with_asserts")); + } + for (int ii = 8; ii <= 11; ++ii) { + const auto& message = checker.get_message(ii); + const auto n_value = std::to_string(ii + 4); + CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); + CHECK_THAT( + message, Contains("Evaluating assertion condition " + "'get_number_with_asserts(nn - 10) >= nn - 5'")); + CHECK_THAT(message, Contains("threw unexpected exception")); + CHECK_THAT(message, Contains("Assertion '(nn - 2) * (nn - 5) > 0'")); + + CHECK_THAT(message, !Contains("AssertMessage()")); + } + { + const auto& message = checker.get_message(12); + CHECK_THAT(message, Contains(std::string("CHECK: nn=17 ; "))); + CHECK_THAT( + message, + Contains( + "Evaluating assertion condition " + "'get_number_with_asserts(nn - 15) >= nn - 10 || AssertMessage() << ")); + CHECK_THAT(message, Contains("threw unexpected exception")); + CHECK_THAT(message, Contains("Assertion '(nn - 2) * (nn - 5) > 0'")); + } + for (int ii = 13; ii <= 15; ++ii) { + const auto& message = checker.get_message(ii); + CHECK_THAT( + message, + Contains(std::string("CHECK: nn=") + std::to_string(ii + 5) + " ; ")); + CHECK_THAT( + message, Contains("Evaluating assertion condition " + "'get_number_with_asserts(nn - 10) >= nn - 5'")); + CHECK_THAT(message, Contains("threw unexpected exception")); + CHECK_THAT(message, Contains("Assertion")); + CHECK_THAT( + message, + Contains(std::string("'N=") + std::to_string(ii - 5) + ": second")); + + CHECK_THAT(message, !Contains("(nn - 2) * (nn - 5)")); + CHECK_THAT(message, !Contains("AssertMessage()")); + } + for (int ii = 16; ii <= 18; ++ii) { + const auto& message = checker.get_message(ii); + CHECK_THAT( + message, + Contains(std::string("CHECK: nn=") + std::to_string(ii + 7) + " ; ")); + CHECK_THAT( + message, + Contains( + "Evaluating assertion condition " + "'get_number_with_asserts(nn - 15) >= nn - 10 || AssertMessage()")); + CHECK_THAT(message, Contains("threw unexpected exception")); + CHECK_THAT(message, Contains("Assertion")); + CHECK_THAT( + message, + Contains(std::string("'N=") + std::to_string(ii - 8) + ": second")); + + CHECK_THAT(message, !Contains("(nn - 2) * (nn - 5)")); + } + for (int ii = 19; ii <= 23; ++ii) { + const auto& message = checker.get_message(ii); + const auto n_value = std::to_string(ii + 7); + CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); + CHECK_THAT(message, Contains("Assertion ")); + CHECK_THAT(message, Contains("failed: ")); + CHECK_THAT( + message, Contains("'N=" + n_value + ", M=" + std::to_string(ii + 12))); + + CHECK_THAT(message, !Contains("Evaluating assertion condition")); + CHECK_THAT(message, !Contains("get_number_with_asserts")); + CHECK_THAT(message, !Contains("threw unexpected exception")); + CHECK_THAT(message, !Contains("Assertion()")); + CHECK_THAT(message, !Contains("(nn - 2) * (nn - 5)")); + } + CHECK(values_of_nn_with_error == std::vector{2, 3, 4, 5, 8, 9, + 10, 11, 12, 13, 14, 15, + 17, 18, 19, 20, 23, 24, + 25, 26, 27, 28, 29, 30}); + checker.final_checks(); +} + +SCENARIO("Asserts with various bool conversions") { + // First, list things which do throw. + bool throws = true; + try { + TKET_ASSERT_WITH_THROW(!""); + throws = false; + } catch (const std::exception&) { + } + CHECK(throws); + + throws = true; + try { + TKET_ASSERT_WITH_THROW(0); + throws = false; + } catch (const std::exception&) { + } + CHECK(throws); + + int xx = 1; + try { + // Now, list non-throwing things first. + TKET_ASSERT_WITH_THROW(""); + ++xx; + TKET_ASSERT_WITH_THROW("aaaaa"); + ++xx; + TKET_ASSERT_WITH_THROW(xx); + ++xx; + TKET_ASSERT_WITH_THROW(true); + ++xx; + TKET_ASSERT_WITH_THROW(-1); + ++xx; + TKET_ASSERT_WITH_THROW(xx > 0); + ++xx; + // Throws + TKET_ASSERT_WITH_THROW(!"bbbbb"); + xx *= 1000; + } catch (const std::exception&) { + xx *= 100; + } + CHECK(xx == 700); +} + +} // namespace tket diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 40248345ff..b496083faa 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -25,6 +25,7 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/CircuitsForTesting.cpp ${TKET_TESTS_DIR}/Utils/test_MatrixAnalysis.cpp ${TKET_TESTS_DIR}/Utils/test_CosSinDecomposition.cpp + ${TKET_TESTS_DIR}/Utils/test_TketAssertWithThrow.cpp ${TKET_TESTS_DIR}/Graphs/EdgeSequence.cpp ${TKET_TESTS_DIR}/Graphs/EdgeSequenceColouringParameters.cpp ${TKET_TESTS_DIR}/Graphs/GraphTestingRoutines.cpp @@ -56,6 +57,7 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/PartialTsaTesting.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ProblemGeneration.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/TestStatsStructs.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TSAUtils/test_SwapFunctions.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_BestTsaFixedSwapSequences.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_DistancesFromArchitecture.cpp From 2d3a1ab512aa1b53dc086cf04dc75f7f8cfba64b Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Thu, 3 Feb 2022 20:22:17 +0000 Subject: [PATCH 040/146] clang format --- tket/tests/Utils/test_TketAssertWithThrow.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tket/tests/Utils/test_TketAssertWithThrow.cpp b/tket/tests/Utils/test_TketAssertWithThrow.cpp index a45276d269..6598f83a13 100644 --- a/tket/tests/Utils/test_TketAssertWithThrow.cpp +++ b/tket/tests/Utils/test_TketAssertWithThrow.cpp @@ -218,10 +218,9 @@ SCENARIO("Asserts with throws within calls") { const auto& message = checker.get_message(12); CHECK_THAT(message, Contains(std::string("CHECK: nn=17 ; "))); CHECK_THAT( - message, - Contains( - "Evaluating assertion condition " - "'get_number_with_asserts(nn - 15) >= nn - 10 || AssertMessage() << ")); + message, Contains("Evaluating assertion condition " + "'get_number_with_asserts(nn - 15) >= nn - 10 || " + "AssertMessage() << ")); CHECK_THAT(message, Contains("threw unexpected exception")); CHECK_THAT(message, Contains("Assertion '(nn - 2) * (nn - 5) > 0'")); } From 740cfc6bb8c77d40ab5015d3f11c3c0357250595 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 4 Feb 2022 14:50:52 +0000 Subject: [PATCH 041/146] try to fix code coverage branching problems in TKET_ASSERT_WITH_THROW by hiding throws --- tket/src/Utils/AssertWithThrowHelper.cpp | 44 ++++++++++++ tket/src/Utils/CMakeLists.txt | 1 + tket/src/Utils/include/Utils/Assert.hpp | 67 +++++++++---------- .../include/Utils/AssertWithThrowHelper.hpp | 58 ++++++++++++++++ 4 files changed, 133 insertions(+), 37 deletions(-) create mode 100644 tket/src/Utils/AssertWithThrowHelper.cpp create mode 100644 tket/src/Utils/include/Utils/AssertWithThrowHelper.hpp diff --git a/tket/src/Utils/AssertWithThrowHelper.cpp b/tket/src/Utils/AssertWithThrowHelper.cpp new file mode 100644 index 0000000000..5f2d379d18 --- /dev/null +++ b/tket/src/Utils/AssertWithThrowHelper.cpp @@ -0,0 +1,44 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "AssertWithThrowHelper.hpp" + +// GCOVR_EXCL_START +AssertWithThrowHelper::AssertWithThrowHelper() + : m_get_error_stream_called(false) {} + +std::stringstream& AssertWithThrowHelper::get_error_stream() { + auto& object = get(); + object.m_get_error_stream_called = true; + return object.m_ss; +} + +void AssertWithThrowHelper::throw_upon_error() { + auto& object = get(); + if (!object.m_get_error_stream_called) { + return; + } + const auto message = object.m_ss.str(); + // Clear the stream, ready for the next error + // (since the caller might catch the expection and then throw others). + object.m_get_error_stream_called = false; + object.m_ss.str(std::string()); + throw std::runtime_error(message); +} + +AssertWithThrowHelper& AssertWithThrowHelper::get() { + static AssertWithThrowHelper object; + return object; +} +// GCOVR_EXCL_STOP diff --git a/tket/src/Utils/CMakeLists.txt b/tket/src/Utils/CMakeLists.txt index e937647ec5..4ec3cafa0f 100644 --- a/tket/src/Utils/CMakeLists.txt +++ b/tket/src/Utils/CMakeLists.txt @@ -20,6 +20,7 @@ endif() add_library(tket-${COMP} AssertMessage.cpp + AssertWithThrowHelper.cpp TketLog.cpp UnitID.cpp HelperFunctions.cpp diff --git a/tket/src/Utils/include/Utils/Assert.hpp b/tket/src/Utils/include/Utils/Assert.hpp index 94f293fd4e..a4871f82c4 100644 --- a/tket/src/Utils/include/Utils/Assert.hpp +++ b/tket/src/Utils/include/Utils/Assert.hpp @@ -18,6 +18,7 @@ #include #include "AssertMessage.hpp" +#include "AssertWithThrowHelper.hpp" #include "TketLog.hpp" /** @@ -83,41 +84,33 @@ /** Like TKET_ASSERT, but throws an exception instead of aborting * if the condition is not satisfied. */ -#define TKET_ASSERT_WITH_THROW(b) \ - /* GCOVR_EXCL_START */ \ - do { \ - bool intended_exception_with_message = false; \ - try { \ - if (!(b)) { \ - std::stringstream msg; \ - msg << "Assertion '" << #b << "' (" << __FILE__ << " : " << __func__ \ - << " : " << __LINE__ << ") failed."; \ - intended_exception_with_message = true; \ - throw std::runtime_error(msg.str()); \ - } \ - } catch (const AssertMessage::MessageData& e1) { \ - std::stringstream msg; \ - msg << "Assertion "; \ - if (e1.verbose) { \ - msg << "'" << #b << "' "; \ - } \ - msg << "(" << __FILE__ << " : " << __func__ << " : " << __LINE__ \ - << ") failed: '" << e1.what() << "'"; \ - throw std::runtime_error(msg.str()); \ - } catch (const std::exception& e2) { \ - if (intended_exception_with_message) { \ - throw std::runtime_error(e2.what()); \ - } \ - std::stringstream msg; \ - msg << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ - << " : " << __func__ << " : " << __LINE__ \ - << ") threw unexpected exception: '" << e2.what() << "'"; \ - throw std::runtime_error(msg.str()); \ - } catch (...) { \ - std::stringstream msg; \ - msg << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ - << " : " << __func__ << " : " << __LINE__ \ - << ") threw unknown exception."; \ - throw std::runtime_error(msg.str()); \ - } \ +#define TKET_ASSERT_WITH_THROW(b) \ + /* GCOVR_EXCL_START */ \ + do { \ + try { \ + if (!(b)) { \ + auto& ss = AssertWithThrowHelper::get_error_stream(); \ + ss << "Assertion '" << #b << "' (" << __FILE__ << " : " << __func__ \ + << " : " << __LINE__ << ") failed."; \ + } \ + } catch (const AssertMessage::MessageData& e1) { \ + auto& ss = AssertWithThrowHelper::get_error_stream(); \ + ss << "Assertion "; \ + if (e1.verbose) { \ + ss << "'" << #b << "' "; \ + } \ + ss << "(" << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") failed: '" << e1.what() << "'"; \ + } catch (const std::exception& e2) { \ + auto& ss = AssertWithThrowHelper::get_error_stream(); \ + ss << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ + << " : " << __func__ << " : " << __LINE__ \ + << ") threw unexpected exception: '" << e2.what() << "'"; \ + } catch (...) { \ + auto& ss = AssertWithThrowHelper::get_error_stream(); \ + ss << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ + << " : " << __func__ << " : " << __LINE__ \ + << ") threw unknown exception."; \ + } \ + AssertWithThrowHelper::throw_upon_error(); \ } while (0) /* GCOVR_EXCL_STOP */ diff --git a/tket/src/Utils/include/Utils/AssertWithThrowHelper.hpp b/tket/src/Utils/include/Utils/AssertWithThrowHelper.hpp new file mode 100644 index 0000000000..5e0c569228 --- /dev/null +++ b/tket/src/Utils/include/Utils/AssertWithThrowHelper.hpp @@ -0,0 +1,58 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +// GCOVR_EXCL_START +/** To be used only by the TKET_ASSERT_WITH_THROW macro. + * Something like this is necessary to prevent exceptions + * generating many extra branches in test coverage, see + * + * https://stackoverflow.com/questions/42003783/ + * lcov-gcov-branch-coverage-with-c-producing-branches-all-over-the-place?rq=1 + * + * We want to hide the throws (or at least, have one single throw), + * and also provide a stringstream to avoid having to construct one + * every time the assert is checked (asserts must have + * almost zero performance impact if they are not triggered). + */ +class AssertWithThrowHelper { + public: + /** Get a stored stream, to write errors to. + * The caller should only call this if they are certain + * that an error has occurred. + * The caller can write to this multiple times + * before calling throw_upon_error(). + */ + static std::stringstream& get_error_stream(); + + /** If get_error_stream() was previously called, + * throw an exception with the contents of the stream + * (even if an empty string), + * and clear the stream ready for the next use. + * Otherwise does nothing. + */ + static void throw_upon_error(); + + private: + bool m_get_error_stream_called; + std::stringstream m_ss; + + AssertWithThrowHelper(); + + static AssertWithThrowHelper& get(); +}; +// GCOVR_EXCL_STOP From 04efd72b7f8674c9f486e9a0b095973cfd09a848 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 4 Feb 2022 14:57:10 +0000 Subject: [PATCH 042/146] correct "does not return a value" error --- tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp b/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp index e6c537bda6..c4d70c0bc3 100644 --- a/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp +++ b/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp @@ -70,8 +70,10 @@ Eigen::MatrixXcd GateUnitaryMatrixVariableQubits::get_dense_unitary( return GateUnitaryMatrixImplementations::NPhasedX( number_of_qubits, parameters[0], parameters[1]); default: - TKET_ASSERT_WITH_THROW(false); + break; } + TKET_ASSERT_WITH_THROW(false); + return Eigen::MatrixXcd(); } } // namespace internal From 1b667a5d35267201ca9c668162ab74f49eabe8aa Mon Sep 17 00:00:00 2001 From: yao-cqc <75305462+yao-cqc@users.noreply.github.com> Date: Fri, 4 Feb 2022 14:57:45 +0000 Subject: [PATCH 043/146] Infra/use bimap for quantum boundary (#185) * Add sequenced_bimap_t * Use sequenced_bimap_t for unit_vertport_frontier_t * Update MultiGateReorder --- tket/src/Mapping/MappingFrontier.cpp | 8 +-- tket/src/Mapping/MultiGateReorder.cpp | 50 +++++++------------ .../include/Mapping/MappingFrontier.hpp | 2 +- .../include/Utils/SequencedContainers.hpp | 16 ++++++ 4 files changed, 36 insertions(+), 40 deletions(-) diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index 9b2650da1e..b38aefd7ac 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -10,12 +10,8 @@ namespace tket { UnitID get_unitid_from_unit_frontier( const std::shared_ptr& u_frontier, const VertPort& vp) { - for (auto it = u_frontier->get().begin(); - it != u_frontier->get().end(); ++it) { - if (it->second == vp) { - return it->first; - } - } + auto it = u_frontier->get().find(vp); + if (it != u_frontier->get().end()) return it->first; throw MappingFrontierError( std::string("Edge provided not in unit_frontier_t object.")); } diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index 1b0a2b036b..e127489d7a 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -14,24 +14,25 @@ MultiGateReorder::MultiGateReorder( _mapping_frontier->circuit_, _mapping_frontier->quantum_boundary)); } -// Traverse the DAG to the quantum frontier encoded in q_boundary_map +// Traverse the DAG to the quantum frontier // to find the UnitID associated with an VertPort UnitID get_unitid_from_vertex_port( - const Circuit &circ, const VertPort &vert_port, - const std::map &q_boundary_map) { + const std::shared_ptr &frontier, + const VertPort &vert_port) { VertPort current_vert_port = vert_port; while (true) { - auto it = q_boundary_map.find(current_vert_port); - if (it != q_boundary_map.end()) { - return it->second; + auto it = + frontier->quantum_boundary->get().find(current_vert_port); + if (it != frontier->quantum_boundary->get().end()) { + return it->first; } - Edge current_e = circ.get_nth_out_edge( + Edge current_e = frontier->circuit_.get_nth_out_edge( current_vert_port.first, current_vert_port.second); Vertex prev_vert; Edge prev_e; std::tie(prev_vert, prev_e) = - circ.get_prev_pair(current_vert_port.first, current_e); - current_vert_port = {prev_vert, circ.get_source_port(prev_e)}; + frontier->circuit_.get_prev_pair(current_vert_port.first, current_e); + current_vert_port = {prev_vert, frontier->circuit_.get_source_port(prev_e)}; } } @@ -46,12 +47,11 @@ bool is_multiq_quantum_gate(const Circuit &circ, const Vertex &vert) { } bool is_physically_permitted( - const Circuit &circ, const ArchitecturePtr &arc_ptr, const Vertex &vert, - const std::map &q_boundary_map) { + const std::shared_ptr &frontier, + const ArchitecturePtr &arc_ptr, const Vertex &vert) { std::vector nodes; - for (port_t port = 0; port < circ.n_ports(vert); ++port) { - nodes.push_back( - Node(get_unitid_from_vertex_port(circ, {vert, port}, q_boundary_map))); + for (port_t port = 0; port < frontier->circuit_.n_ports(vert); ++port) { + nodes.push_back(Node(get_unitid_from_vertex_port(frontier, {vert, port}))); } return arc_ptr->valid_operation(nodes); @@ -176,14 +176,11 @@ void MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { // store a copy of the original this->mapping_frontier_->quantum_boundray // this object will be updated and reset throughout the procedure - // so need to return it to original setting at end - // also create a map for getting UnitID from VertPort - std::map q_boundary_map; + // so need to return it to original setting at end. unit_vertport_frontier_t copy; for (const std::pair &pair : this->mapping_frontier_->quantum_boundary->get()) { copy.insert({pair.first, pair.second}); - q_boundary_map.insert({pair.second, pair.first}); } // Get a subcircuit only for iterating vertices Subcircuit circ = @@ -196,8 +193,7 @@ void MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { // 2. is a multi qubit quantum operation without classical controls if (is_multiq_quantum_gate(this->mapping_frontier_->circuit_, vert) && is_physically_permitted( - this->mapping_frontier_->circuit_, this->architecture_, vert, - q_boundary_map)) { + this->mapping_frontier_, this->architecture_, vert)) { std::optional> commute_pairs = try_find_commute_edges( this->mapping_frontier_->circuit_, this->u_frontier_edges_, vert); @@ -212,12 +208,6 @@ void MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( this->mapping_frontier_->circuit_, this->mapping_frontier_->quantum_boundary)); - // Update the map - q_boundary_map.clear(); - for (const std::pair &pair : - this->mapping_frontier_->quantum_boundary->get()) { - q_boundary_map.insert({pair.second, pair.first}); - } } } } @@ -235,11 +225,6 @@ bool MultiGateReorderRoutingMethod::check_method( const EdgeVec u_frontier_edges = convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( mapping_frontier->circuit_, mapping_frontier->quantum_boundary)); - std::map q_boundary_map; - for (const std::pair &pair : - mapping_frontier->quantum_boundary->get()) { - q_boundary_map.insert({pair.second, pair.first}); - } Subcircuit circ = mapping_frontier->get_frontier_subcircuit( this->max_depth_, this->max_size_); @@ -247,8 +232,7 @@ bool MultiGateReorderRoutingMethod::check_method( // we are certain that any multi-q vert lies after the frontier for (const Vertex &vert : circ.verts) { if (is_multiq_quantum_gate(mapping_frontier->circuit_, vert) && - is_physically_permitted( - mapping_frontier->circuit_, architecture, vert, q_boundary_map)) { + is_physically_permitted(mapping_frontier, architecture, vert)) { std::optional> commute_pairs = try_find_commute_edges( mapping_frontier->circuit_, u_frontier_edges, vert); diff --git a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp index afc4128e58..02c404869b 100644 --- a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp +++ b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp @@ -8,7 +8,7 @@ namespace tket { -typedef sequenced_map_t unit_vertport_frontier_t; +typedef sequenced_bimap_t unit_vertport_frontier_t; // list of error types to throw out class MappingFrontierError : public std::logic_error { diff --git a/tket/src/Utils/include/Utils/SequencedContainers.hpp b/tket/src/Utils/include/Utils/SequencedContainers.hpp index 843fb7453f..3a33fc174c 100644 --- a/tket/src/Utils/include/Utils/SequencedContainers.hpp +++ b/tket/src/Utils/include/Utils/SequencedContainers.hpp @@ -23,7 +23,23 @@ namespace tket { struct TagKey {}; +struct TagValue {}; struct TagSeq {}; + +template +using sequenced_bimap_t = boost::multi_index::multi_index_container< + std::pair, + boost::multi_index::indexed_by< + boost::multi_index::ordered_unique< + boost::multi_index::tag, + boost::multi_index::member< + std::pair, A, &std::pair::first>>, + boost::multi_index::ordered_unique< + boost::multi_index::tag, + boost::multi_index::member< + std::pair, B, &std::pair::second>>, + boost::multi_index::sequenced>>>; + template using sequenced_map_t = boost::multi_index::multi_index_container< std::pair, From 7fede77826a6ac831b78bcbf10f7afcf38a0be3d Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 4 Feb 2022 18:57:32 +0000 Subject: [PATCH 044/146] remove try/catch from tket assert with throw, to cut down branching --- tket/src/Utils/AssertMessage.cpp | 26 ++++-- tket/src/Utils/AssertWithThrowHelper.cpp | 44 --------- tket/src/Utils/CMakeLists.txt | 1 - tket/src/Utils/include/Utils/Assert.hpp | 89 +++++++++---------- .../src/Utils/include/Utils/AssertMessage.hpp | 59 +++++++----- .../include/Utils/AssertWithThrowHelper.hpp | 58 ------------ tket/tests/Utils/test_TketAssertWithThrow.cpp | 77 ++++++++-------- 7 files changed, 140 insertions(+), 214 deletions(-) delete mode 100644 tket/src/Utils/AssertWithThrowHelper.cpp delete mode 100644 tket/src/Utils/include/Utils/AssertWithThrowHelper.hpp diff --git a/tket/src/Utils/AssertMessage.cpp b/tket/src/Utils/AssertMessage.cpp index d4a497c554..3bd65b285c 100644 --- a/tket/src/Utils/AssertMessage.cpp +++ b/tket/src/Utils/AssertMessage.cpp @@ -14,22 +14,32 @@ #include "AssertMessage.hpp" +#include + namespace tket { // GCOVR_EXCL_START -AssertMessage::AssertMessage() : m_verbose(false) {} +AssertMessage::AssertMessage() {} + +std::string AssertMessage::get_error_message() { + const auto message = get_error_stream().str(); -AssertMessage AssertMessage::verbose() { - AssertMessage message; - message.m_verbose = true; + // Clear the global stream, ready for the next message + // (in the assert with throw variants, we may try/catch + // multiple times). + get_error_stream().str(std::string()); return message; } -AssertMessage::MessageData::MessageData(const std::string& str, bool vbose) - : std::runtime_error(str), verbose(vbose) {} +AssertMessage::operator bool() const { return false; } + +std::stringstream& AssertMessage::get_error_stream() { + static std::stringstream ss; + return ss; +} -AssertMessage::operator bool() const { - throw MessageData(m_ss.str(), m_verbose); +void AssertMessage::throw_message(const std::string& str) { + throw std::runtime_error(str); } // GCOVR_EXCL_STOP diff --git a/tket/src/Utils/AssertWithThrowHelper.cpp b/tket/src/Utils/AssertWithThrowHelper.cpp deleted file mode 100644 index 5f2d379d18..0000000000 --- a/tket/src/Utils/AssertWithThrowHelper.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "AssertWithThrowHelper.hpp" - -// GCOVR_EXCL_START -AssertWithThrowHelper::AssertWithThrowHelper() - : m_get_error_stream_called(false) {} - -std::stringstream& AssertWithThrowHelper::get_error_stream() { - auto& object = get(); - object.m_get_error_stream_called = true; - return object.m_ss; -} - -void AssertWithThrowHelper::throw_upon_error() { - auto& object = get(); - if (!object.m_get_error_stream_called) { - return; - } - const auto message = object.m_ss.str(); - // Clear the stream, ready for the next error - // (since the caller might catch the expection and then throw others). - object.m_get_error_stream_called = false; - object.m_ss.str(std::string()); - throw std::runtime_error(message); -} - -AssertWithThrowHelper& AssertWithThrowHelper::get() { - static AssertWithThrowHelper object; - return object; -} -// GCOVR_EXCL_STOP diff --git a/tket/src/Utils/CMakeLists.txt b/tket/src/Utils/CMakeLists.txt index 4ec3cafa0f..e937647ec5 100644 --- a/tket/src/Utils/CMakeLists.txt +++ b/tket/src/Utils/CMakeLists.txt @@ -20,7 +20,6 @@ endif() add_library(tket-${COMP} AssertMessage.cpp - AssertWithThrowHelper.cpp TketLog.cpp UnitID.cpp HelperFunctions.cpp diff --git a/tket/src/Utils/include/Utils/Assert.hpp b/tket/src/Utils/include/Utils/Assert.hpp index a4871f82c4..5715f134a7 100644 --- a/tket/src/Utils/include/Utils/Assert.hpp +++ b/tket/src/Utils/include/Utils/Assert.hpp @@ -18,7 +18,6 @@ #include #include "AssertMessage.hpp" -#include "AssertWithThrowHelper.hpp" #include "TketLog.hpp" /** @@ -31,10 +30,6 @@ * TKET_ASSERT(xcritical(msg.str()); \ std::abort(); \ } \ - } catch (const AssertMessage::MessageData& e1) { \ - std::stringstream msg; \ - msg << "Assertion "; \ - if (e1.verbose) { \ - msg << "'" << #b << "' "; \ - } \ - msg << "(" << __FILE__ << " : " << __func__ << " : " << __LINE__ \ - << ") failed: '" << e1.what() << "': aborting."; \ - tket::tket_log()->critical(msg.str()); \ - std::abort(); \ } catch (const std::exception& e2) { \ std::stringstream msg; \ msg << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ @@ -83,34 +78,38 @@ /** Like TKET_ASSERT, but throws an exception instead of aborting * if the condition is not satisfied. + * + * Note: this may seem convoluted. That's because the code coverage + * test programme annoyingly adds lots of branches if exceptions are thrown + * explicitly, despite the STOP/START tags telling it to ignore the code. + * See + * + * https://stackoverflow.com/questions/42003783/ + * lcov-gcov-branch-coverage-with-c-producing-branches-all-over-the-place?rq=1 + * + * We tried "hiding" the exceptions from this macro by putting the throws + * inside another function defined elsewhere. That did make some + * difference, but try/catch blocks also seemed to cause extra + * branching problems. + * Thus, we remove all explicit exceptions AND try/catch blocks, + * in the hope that it will cut down on the undesired extra branches. + * Thus, unlike TKET_ASSERT, an exception thrown by the EVALUATION of b + * will not be caught. But this should be very rare, + * AND we're explicitly trying to throw an exception INSTEAD of aborting, + * so this seems not too bad. */ -#define TKET_ASSERT_WITH_THROW(b) \ - /* GCOVR_EXCL_START */ \ - do { \ - try { \ - if (!(b)) { \ - auto& ss = AssertWithThrowHelper::get_error_stream(); \ - ss << "Assertion '" << #b << "' (" << __FILE__ << " : " << __func__ \ - << " : " << __LINE__ << ") failed."; \ - } \ - } catch (const AssertMessage::MessageData& e1) { \ - auto& ss = AssertWithThrowHelper::get_error_stream(); \ - ss << "Assertion "; \ - if (e1.verbose) { \ - ss << "'" << #b << "' "; \ - } \ - ss << "(" << __FILE__ << " : " << __func__ << " : " << __LINE__ \ - << ") failed: '" << e1.what() << "'"; \ - } catch (const std::exception& e2) { \ - auto& ss = AssertWithThrowHelper::get_error_stream(); \ - ss << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ - << " : " << __func__ << " : " << __LINE__ \ - << ") threw unexpected exception: '" << e2.what() << "'"; \ - } catch (...) { \ - auto& ss = AssertWithThrowHelper::get_error_stream(); \ - ss << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ - << " : " << __func__ << " : " << __LINE__ \ - << ") threw unknown exception."; \ - } \ - AssertWithThrowHelper::throw_upon_error(); \ +#define TKET_ASSERT_WITH_THROW(b) \ + /* GCOVR_EXCL_START */ \ + do { \ + if (!(b)) { \ + std::stringstream msg; \ + msg << "Assertion '" << #b << "' (" << __FILE__ << " : " << __func__ \ + << " : " << __LINE__ << ") failed"; \ + const auto extra_message = tket::AssertMessage::get_error_message(); \ + if (!extra_message.empty()) { \ + msg << ": '" << extra_message << "'"; \ + } \ + msg << "."; \ + tket::AssertMessage::throw_message(msg.str()); \ + } \ } while (0) /* GCOVR_EXCL_STOP */ diff --git a/tket/src/Utils/include/Utils/AssertMessage.hpp b/tket/src/Utils/include/Utils/AssertMessage.hpp index 8cf4a764c6..39b6fdc25b 100644 --- a/tket/src/Utils/include/Utils/AssertMessage.hpp +++ b/tket/src/Utils/include/Utils/AssertMessage.hpp @@ -15,44 +15,61 @@ #pragma once #include -#include namespace tket { -/** This is for use with TKET_ASSERT, when we want to give a more detailed +// GCOVR_EXCL_START +/** This is only for use with TKET_ASSERT, when we want to give a more detailed * error message than just the assertion code and location. + * Also, some code might seem rather strange, but that's because exceptions + * can generate many extra branches in test coverage, see + * + * https://stackoverflow.com/questions/42003783/ + * lcov-gcov-branch-coverage-with-c-producing-branches-all-over-the-place?rq=1 + * + * We want to hide the throws (or at least, have one single throw), + * and also provide a stringstream to avoid having to construct one + * every time the assert is checked (asserts must have + * almost zero performance impact if they are not triggered). */ class AssertMessage { public: - /** Construct the object (the default non-verbose version) to begin writing to - * the stream. */ + /** Construct the object, to begin writing to the stream. */ AssertMessage(); - /** Get a verbose object (not the default). */ - static AssertMessage verbose(); - - /** Thrown when the message construction is finished, to store the necessary - * data. */ - struct MessageData : public std::runtime_error { - bool verbose; - MessageData(const std::string& str, bool verbose); - }; - - /** Throws a MessageData object when called, with the message. */ + /** Always returns false, so that "|| AssertMessage() << a)" + * becomes "|| false)". + */ operator bool() const; - // GCOVR_EXCL_START - /** Every streamable object can be written to the stream. */ + /** Every streamable object x can be written to the stream. */ template AssertMessage& operator<<(const T& x) { - m_ss << x; + get_error_stream() << x; return *this; } - // GCOVR_EXCL_STOP + + /** Get the stored error message. Of course, if AssertMessage() + * has not actually been called, just returns an empty string. + * Also, clears the stored message, ready for the next time. + */ + static std::string get_error_message(); + + /** Simply throws a std::runtime_error with the given message; + * hopefully this will fool the test coverage programme. + */ + static void throw_message(const std::string& str); private: - bool m_verbose; - std::stringstream m_ss; + /** Previously the error message for later use by TKET_ASSERT macros + * was passed on by exceptions within operator bool(), but that + * generated lots of code coverage branching problems. + * So now we use a global variable. The AssertMessage object + * will go out of scope, so there seems to be no other good way + * to pass the information on. + */ + static std::stringstream& get_error_stream(); }; +// GCOVR_EXCL_STOP } // namespace tket diff --git a/tket/src/Utils/include/Utils/AssertWithThrowHelper.hpp b/tket/src/Utils/include/Utils/AssertWithThrowHelper.hpp deleted file mode 100644 index 5e0c569228..0000000000 --- a/tket/src/Utils/include/Utils/AssertWithThrowHelper.hpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include - -// GCOVR_EXCL_START -/** To be used only by the TKET_ASSERT_WITH_THROW macro. - * Something like this is necessary to prevent exceptions - * generating many extra branches in test coverage, see - * - * https://stackoverflow.com/questions/42003783/ - * lcov-gcov-branch-coverage-with-c-producing-branches-all-over-the-place?rq=1 - * - * We want to hide the throws (or at least, have one single throw), - * and also provide a stringstream to avoid having to construct one - * every time the assert is checked (asserts must have - * almost zero performance impact if they are not triggered). - */ -class AssertWithThrowHelper { - public: - /** Get a stored stream, to write errors to. - * The caller should only call this if they are certain - * that an error has occurred. - * The caller can write to this multiple times - * before calling throw_upon_error(). - */ - static std::stringstream& get_error_stream(); - - /** If get_error_stream() was previously called, - * throw an exception with the contents of the stream - * (even if an empty string), - * and clear the stream ready for the next use. - * Otherwise does nothing. - */ - static void throw_upon_error(); - - private: - bool m_get_error_stream_called; - std::stringstream m_ss; - - AssertWithThrowHelper(); - - static AssertWithThrowHelper& get(); -}; -// GCOVR_EXCL_STOP diff --git a/tket/tests/Utils/test_TketAssertWithThrow.cpp b/tket/tests/Utils/test_TketAssertWithThrow.cpp index 6598f83a13..1c9c2f5c6d 100644 --- a/tket/tests/Utils/test_TketAssertWithThrow.cpp +++ b/tket/tests/Utils/test_TketAssertWithThrow.cpp @@ -53,11 +53,16 @@ class MessageChecker { } // namespace -static void check_filename_is_included( +static std::vector get_message_indices_with_filename( const std::vector& messages) { - for (const auto& message : messages) { - CHECK_THAT(message, Contains("test_TketAssertWithThrow.cpp")); + std::vector indices; + for (unsigned ii = 0; ii < messages.size(); ++ii) { + if (messages[ii].find("test_TketAssertWithThrow.cpp") != + std::string::npos) { + indices.push_back(ii); + } } + return indices; } static int get_number(int nn) { @@ -91,7 +96,9 @@ SCENARIO("Simple asserts with throws") { } CHECK(calc_messages.size() == 11); - check_filename_is_included(calc_messages); + CHECK( + get_message_indices_with_filename(calc_messages) == + std::vector{0, 1, 2, 3, 4, 5}); MessageChecker checker(calc_messages); @@ -106,18 +113,20 @@ SCENARIO("Simple asserts with throws") { const auto& message = checker.get_message(ii); const std::string n_value = std::to_string(ii + 5); CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); - CHECK_THAT(message, Contains("Assertion")); + CHECK_THAT( + message, + Contains("Assertion '(nn - 8) * (nn - 10) > 0 || AssertMessage() <<")); CHECK_THAT(message, Contains("failed:")); CHECK_THAT(message, Contains(std::string("'N=") + n_value + "'")); } for (int ii = 6; ii <= 10; ++ii) { const auto& message = checker.get_message(ii); const std::string n_value = std::to_string(ii + 10); - CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); CHECK_THAT( - message, - Contains("Evaluating assertion condition 'get_number(nn) < 20'")); - CHECK_THAT(message, Contains("threw unexpected exception: 'Error!!'")); + message, Contains(std::string("CHECK: nn=") + n_value + " ; Error!!")); + // The function "get_number" threw, but this was NOT picked up + // by the macro. Never mind. + CHECK_THAT(message, !Contains("ssertion")); } CHECK( values_of_nn_with_error == @@ -168,7 +177,13 @@ SCENARIO("Asserts with throws within calls") { } } CHECK(calc_messages.size() == 24); - check_filename_is_included(calc_messages); + + // Every error is thrown by TKET_ASSERT_WITH_THROW, + // so should have the filename. + CHECK( + get_message_indices_with_filename(calc_messages) == + std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}); MessageChecker checker(calc_messages); @@ -179,7 +194,7 @@ SCENARIO("Asserts with throws within calls") { Contains(std::string("CHECK: nn=") + std::to_string(ii + 2) + " ; ")); // comes from "get_number_with_asserts" CHECK_THAT(message, Contains("Assertion '(nn - 2) * (nn - 5) > 0'")); - // the function name + // The function name: the assert macro was inside the function. CHECK_THAT(message, Contains("get_number_with_asserts")); } for (int ii = 4; ii <= 6; ++ii) { @@ -206,53 +221,38 @@ SCENARIO("Asserts with throws within calls") { const auto& message = checker.get_message(ii); const auto n_value = std::to_string(ii + 4); CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); - CHECK_THAT( - message, Contains("Evaluating assertion condition " - "'get_number_with_asserts(nn - 10) >= nn - 5'")); - CHECK_THAT(message, Contains("threw unexpected exception")); + CHECK_THAT(message, Contains("get_number_with_asserts")); CHECK_THAT(message, Contains("Assertion '(nn - 2) * (nn - 5) > 0'")); - + // The throw within "get_number_with_asserts" was not picked up + // by the macro, so the macro code is not present in the error message. CHECK_THAT(message, !Contains("AssertMessage()")); } { const auto& message = checker.get_message(12); - CHECK_THAT(message, Contains(std::string("CHECK: nn=17 ; "))); CHECK_THAT( - message, Contains("Evaluating assertion condition " - "'get_number_with_asserts(nn - 15) >= nn - 10 || " - "AssertMessage() << ")); - CHECK_THAT(message, Contains("threw unexpected exception")); - CHECK_THAT(message, Contains("Assertion '(nn - 2) * (nn - 5) > 0'")); + message, + Contains("CHECK: nn=17 ; Assertion '(nn - 2) * (nn - 5) > 0'")); } for (int ii = 13; ii <= 15; ++ii) { const auto& message = checker.get_message(ii); CHECK_THAT( message, - Contains(std::string("CHECK: nn=") + std::to_string(ii + 5) + " ; ")); - CHECK_THAT( - message, Contains("Evaluating assertion condition " - "'get_number_with_asserts(nn - 10) >= nn - 5'")); - CHECK_THAT(message, Contains("threw unexpected exception")); - CHECK_THAT(message, Contains("Assertion")); + Contains( + std::string("CHECK: nn=") + std::to_string(ii + 5) + + " ; Assertion '(nn - 8) * (nn - 10) > 0 || AssertMessage() <<")); CHECK_THAT( message, Contains(std::string("'N=") + std::to_string(ii - 5) + ": second")); CHECK_THAT(message, !Contains("(nn - 2) * (nn - 5)")); - CHECK_THAT(message, !Contains("AssertMessage()")); } for (int ii = 16; ii <= 18; ++ii) { const auto& message = checker.get_message(ii); - CHECK_THAT( - message, - Contains(std::string("CHECK: nn=") + std::to_string(ii + 7) + " ; ")); CHECK_THAT( message, Contains( - "Evaluating assertion condition " - "'get_number_with_asserts(nn - 15) >= nn - 10 || AssertMessage()")); - CHECK_THAT(message, Contains("threw unexpected exception")); - CHECK_THAT(message, Contains("Assertion")); + std::string("CHECK: nn=") + std::to_string(ii + 7) + + " ; Assertion '(nn - 8) * (nn - 10) > 0 || AssertMessage() <<")); CHECK_THAT( message, Contains(std::string("'N=") + std::to_string(ii - 8) + ": second")); @@ -262,7 +262,10 @@ SCENARIO("Asserts with throws within calls") { for (int ii = 19; ii <= 23; ++ii) { const auto& message = checker.get_message(ii); const auto n_value = std::to_string(ii + 7); - CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); + CHECK_THAT( + message, Contains( + std::string("CHECK: nn=") + n_value + + " ; Assertion 'mm <= 30 || AssertMessage() << ")); CHECK_THAT(message, Contains("Assertion ")); CHECK_THAT(message, Contains("failed: ")); CHECK_THAT( From 5991a8cdd3c94f1462e61f202e796fedf4c04274 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 7 Feb 2022 14:42:03 +0000 Subject: [PATCH 045/146] remove TKET_ASSERT_WITH_THROW, replace with TKET_ASSERT --- tket/src/Gate/GateUnitaryMatrix.cpp | 2 +- .../Gate/GateUnitaryMatrixVariableQubits.cpp | 12 +- tket/src/Gate/GateUnitarySparseMatrix.cpp | 2 +- tket/src/Graphs/AdjacencyData.cpp | 8 +- tket/src/Graphs/BruteForceColouring.cpp | 2 +- tket/src/Simulation/BitOperations.cpp | 2 +- tket/src/Simulation/GateNode.cpp | 10 +- .../PauliExpBoxUnitaryCalculator.cpp | 2 +- .../src/TokenSwapping/ArchitectureMapping.cpp | 10 +- .../TokenSwapping/CyclesCandidateManager.cpp | 8 +- .../src/TokenSwapping/CyclesGrowthManager.cpp | 6 +- tket/src/TokenSwapping/CyclesPartialTsa.cpp | 10 +- .../TokenSwapping/CyclicShiftCostEstimate.cpp | 6 +- .../DistancesFromArchitecture.cpp | 2 +- tket/src/TokenSwapping/HybridTsa00.cpp | 4 +- .../NeighboursFromArchitecture.cpp | 4 +- .../src/TokenSwapping/RiverFlowPathFinder.cpp | 10 +- tket/src/TokenSwapping/SwapListOptimiser.cpp | 18 +- .../TSAUtils/GeneralFunctions.cpp | 4 +- .../TSAUtils/VertexMappingFunctions.cpp | 4 +- .../TableLookup/CanonicalRelabelling.cpp | 16 +- .../TableLookup/ExactMappingLookup.cpp | 18 +- .../TableLookup/FilteredSwapSequences.cpp | 14 +- .../TableLookup/PartialMappingLookup.cpp | 2 +- .../TableLookup/SwapConversion.cpp | 8 +- .../TableLookup/SwapListSegmentOptimiser.cpp | 12 +- .../TableLookup/SwapListTableOptimiser.cpp | 20 +- .../TableLookup/VertexMapResizing.cpp | 12 +- tket/src/TokenSwapping/TrivialTSA.cpp | 43 ++- .../VectorListHybridSkeleton.cpp | 58 +-- .../TokenSwapping/VectorListHybrid.hpp | 6 +- .../TokenSwapping/main_entry_functions.cpp | 2 +- tket/src/Utils/AssertMessage.cpp | 11 +- tket/src/Utils/include/Utils/Assert.hpp | 45 +-- .../src/Utils/include/Utils/AssertMessage.hpp | 18 +- tket/tests/Utils/test_TketAssertWithThrow.cpp | 329 ------------------ tket/tests/tkettestsfiles.cmake | 1 - 37 files changed, 181 insertions(+), 560 deletions(-) delete mode 100644 tket/tests/Utils/test_TketAssertWithThrow.cpp diff --git a/tket/src/Gate/GateUnitaryMatrix.cpp b/tket/src/Gate/GateUnitaryMatrix.cpp index 18f877ebb8..06b6808d2d 100644 --- a/tket/src/Gate/GateUnitaryMatrix.cpp +++ b/tket/src/Gate/GateUnitaryMatrix.cpp @@ -157,7 +157,7 @@ static Eigen::MatrixXcd get_unitary_for_ordinary_fixed_size_case( const Eigen::MatrixXcd matr = get_unitary_or_throw(op_type, number_of_qubits, parameters); - TKET_ASSERT_WITH_THROW(matr.cols() == matr.rows()); + TKET_ASSERT(matr.cols() == matr.rows()); const auto expected_number_of_qubits = get_number_of_qubits(matr.cols()); if (expected_number_of_qubits == number_of_qubits) { return matr; diff --git a/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp b/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp index c4d70c0bc3..7743473aaf 100644 --- a/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp +++ b/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp @@ -50,29 +50,29 @@ unsigned GateUnitaryMatrixVariableQubits::get_number_of_parameters() const { Eigen::MatrixXcd GateUnitaryMatrixVariableQubits::get_dense_unitary( unsigned number_of_qubits, const std::vector& parameters) const { // This class is internal only, so an assert is OK. - TKET_ASSERT_WITH_THROW(known_type); - TKET_ASSERT_WITH_THROW(parameters.size() == number_of_parameters); + TKET_ASSERT(known_type); + TKET_ASSERT(parameters.size() == number_of_parameters); switch (parameters.size()) { case 0: - TKET_ASSERT_WITH_THROW(op_type == OpType::CnX); + TKET_ASSERT(op_type == OpType::CnX); return GateUnitaryMatrixImplementations::CnX(number_of_qubits); case 1: if (op_type == OpType::CnRy) { return GateUnitaryMatrixImplementations::CnRy( number_of_qubits, parameters[0]); } else { - TKET_ASSERT_WITH_THROW(op_type == OpType::PhaseGadget); + TKET_ASSERT(op_type == OpType::PhaseGadget); return GateUnitaryMatrixImplementations::PhaseGadget( number_of_qubits, parameters[0]); } case 2: - TKET_ASSERT_WITH_THROW(op_type == OpType::NPhasedX); + TKET_ASSERT(op_type == OpType::NPhasedX); return GateUnitaryMatrixImplementations::NPhasedX( number_of_qubits, parameters[0], parameters[1]); default: break; } - TKET_ASSERT_WITH_THROW(false); + TKET_ASSERT(false); return Eigen::MatrixXcd(); } diff --git a/tket/src/Gate/GateUnitarySparseMatrix.cpp b/tket/src/Gate/GateUnitarySparseMatrix.cpp index 1780c0c996..1fa085db83 100644 --- a/tket/src/Gate/GateUnitarySparseMatrix.cpp +++ b/tket/src/Gate/GateUnitarySparseMatrix.cpp @@ -107,7 +107,7 @@ const FixedTripletsWithNoParameters& FixedTripletsWithNoParameters::get( GateUnitaryMatrixUtils::check_and_throw_upon_wrong_number_of_parameters( gate.get_type(), gate.n_qubits(), GateUnitaryMatrixUtils::get_checked_parameters(gate), 0); - TKET_ASSERT_WITH_THROW(gate.n_qubits() == 3); + TKET_ASSERT(gate.n_qubits() == 3); return data; } } // namespace diff --git a/tket/src/Graphs/AdjacencyData.cpp b/tket/src/Graphs/AdjacencyData.cpp index 18ea0872be..7589513c6e 100644 --- a/tket/src/Graphs/AdjacencyData.cpp +++ b/tket/src/Graphs/AdjacencyData.cpp @@ -65,7 +65,7 @@ string AdjacencyData::to_string() const { const set& AdjacencyData::get_neighbours( std::size_t vertex) const { - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( vertex < m_cleaned_data.size() || AssertMessage() << "AdjacencyData: get_neighbours called with invalid vertex " @@ -101,7 +101,7 @@ bool AdjacencyData::add_edge(std::size_t i, std::size_t j) { } bool AdjacencyData::edge_exists(std::size_t i, std::size_t j) const { - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( (i < m_cleaned_data.size() && j < m_cleaned_data.size()) || AssertMessage() << "edge_exists called with vertices " << i << ", " << j << ", but there are only " << m_cleaned_data.size() @@ -143,11 +143,11 @@ AdjacencyData::AdjacencyData( for (std::size_t i = 0; i < m_cleaned_data.size(); ++i) { for (std::size_t j : raw_data[i]) { - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( i != j || allow_loops || AssertMessage() << "vertex " << i << " out of " << m_cleaned_data.size() << " has a loop."); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( j < m_cleaned_data.size() || AssertMessage() << "vertex " << i << " has illegal neighbour vertex " << j << ", the size is " << m_cleaned_data.size()); diff --git a/tket/src/Graphs/BruteForceColouring.cpp b/tket/src/Graphs/BruteForceColouring.cpp index e5a1891407..1f5995bbe7 100644 --- a/tket/src/Graphs/BruteForceColouring.cpp +++ b/tket/src/Graphs/BruteForceColouring.cpp @@ -91,7 +91,7 @@ struct BruteForceColouring::Impl { // just because CURRENTLY a vertex has only one colour, // that it will ALWAYS be that way! if (initial_clique.count(nodes[node_index].vertex) != 0) { - TKET_ASSERT_WITH_THROW(earlier_colours.size() == 1); + TKET_ASSERT(earlier_colours.size() == 1); forbidden_colours.insert(earlier_colours[0]); } } diff --git a/tket/src/Simulation/BitOperations.cpp b/tket/src/Simulation/BitOperations.cpp index 418a0b8a79..4ccb7fddf9 100644 --- a/tket/src/Simulation/BitOperations.cpp +++ b/tket/src/Simulation/BitOperations.cpp @@ -46,7 +46,7 @@ ExpansionData get_expansion_data( auto test_bit = next_bit; for (unsigned left_shift_arg = 0;; ++left_shift_arg) { if ((test_bit & forbidden_bits) == 0) { - TKET_ASSERT_WITH_THROW(test_bit != 0); + TKET_ASSERT(test_bit != 0); // A free space has been found. push_back(result, next_bit, left_shift_arg); forbidden_bits |= test_bit; diff --git a/tket/src/Simulation/GateNode.cpp b/tket/src/Simulation/GateNode.cpp index 1af51fb425..0e9cb2c455 100644 --- a/tket/src/Simulation/GateNode.cpp +++ b/tket/src/Simulation/GateNode.cpp @@ -168,9 +168,9 @@ struct LiftedBitsResult { void LiftedBitsResult::set( const std::vector& qubits, unsigned full_number_of_qubits) { - TKET_ASSERT_WITH_THROW(full_number_of_qubits >= qubits.size()); - TKET_ASSERT_WITH_THROW(full_number_of_qubits < 32); - TKET_ASSERT_WITH_THROW(!qubits.empty()); + TKET_ASSERT(full_number_of_qubits >= qubits.size()); + TKET_ASSERT(full_number_of_qubits < 32); + TKET_ASSERT(!qubits.empty()); translated_bits.assign(get_matrix_size(qubits.size()), 0); @@ -179,7 +179,7 @@ void LiftedBitsResult::set( SimUInt k_string_bit = 1; for (unsigned count = 0; count < qubits.size(); ++count) { - TKET_ASSERT_WITH_THROW(full_number_of_qubits >= qubits[count] + 1); + TKET_ASSERT(full_number_of_qubits >= qubits[count] + 1); // This will be a bit within the length n string. SimUInt long_string_bit = 1; @@ -225,7 +225,7 @@ static void set_lifted_triplets( const SimUInt free_bits_limit = get_matrix_size(full_number_of_qubits - qubits.size()); - TKET_ASSERT_WITH_THROW(free_bits_limit != 0 || !"Too many bits"); + TKET_ASSERT(free_bits_limit != 0 || !"Too many bits"); for (SimUInt free_bits = 0; free_bits < free_bits_limit; ++free_bits) { const SimUInt expanded_free_bits = diff --git a/tket/src/Simulation/PauliExpBoxUnitaryCalculator.cpp b/tket/src/Simulation/PauliExpBoxUnitaryCalculator.cpp index 36e9651cc9..fe58c9523f 100644 --- a/tket/src/Simulation/PauliExpBoxUnitaryCalculator.cpp +++ b/tket/src/Simulation/PauliExpBoxUnitaryCalculator.cpp @@ -125,7 +125,7 @@ void PauliExpBoxUnitaryCalculator::clear() { void PauliExpBoxUnitaryCalculator::add_entries( unsigned sparse_matrix_index, Pauli pauli) { - TKET_ASSERT_WITH_THROW(sparse_matrix_index < sparse_matrix.size()); + TKET_ASSERT(sparse_matrix_index < sparse_matrix.size()); const auto& single_pauli = pauli_map.at(pauli); sparse_matrix.push_back( get_combined_entry(sparse_matrix[sparse_matrix_index], single_pauli[0])); diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index 8dc811a9ea..4fa444a17c 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -34,7 +34,7 @@ ArchitectureMapping::ArchitectureMapping(const Architecture& arch) const auto& node = m_vertex_to_node_mapping[ii]; { const auto citer = m_node_to_vertex_mapping.find(node); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( citer == m_node_to_vertex_mapping.cend() || AssertMessage() << "Duplicate node " << node.repr() << " at vertices " << citer->second << ", " << ii); @@ -68,7 +68,7 @@ ArchitectureMapping::ArchitectureMapping( // Check that the nodes agree with the architecture object. const auto uids = arch.nodes(); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( uids.size() == m_vertex_to_node_mapping.size() || AssertMessage() << "passed in " << edges.size() << " edges, giving " << m_vertex_to_node_mapping.size() @@ -77,7 +77,7 @@ ArchitectureMapping::ArchitectureMapping( for (const UnitID& uid : uids) { const Node node(uid); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( m_node_to_vertex_mapping.count(node) != 0 || AssertMessage() << "passed in " << edges.size() << " edges, giving " @@ -93,7 +93,7 @@ size_t ArchitectureMapping::number_of_vertices() const { const Node& ArchitectureMapping::get_node(size_t vertex) const { const auto num_vertices = number_of_vertices(); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( vertex < num_vertices || AssertMessage() << "get_node: invalid vertex " << vertex << " (architecture only has " << num_vertices @@ -104,7 +104,7 @@ const Node& ArchitectureMapping::get_node(size_t vertex) const { size_t ArchitectureMapping::get_vertex(const Node& node) const { const auto citer = m_node_to_vertex_mapping.find(node); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( citer != m_node_to_vertex_mapping.cend() || AssertMessage() << "get_vertex: node " << node.repr() << " has no vertex number"); diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.cpp b/tket/src/TokenSwapping/CyclesCandidateManager.cpp index f4abb692c3..13a5698a64 100644 --- a/tket/src/TokenSwapping/CyclesCandidateManager.cpp +++ b/tket/src/TokenSwapping/CyclesCandidateManager.cpp @@ -41,11 +41,11 @@ size_t CyclesCandidateManager::fill_initial_cycle_ids(const Cycles& cycles) { if (cycle_length == 0) { cycle_length = vertices.size(); - TKET_ASSERT_WITH_THROW(cycle_length >= 2); + TKET_ASSERT(cycle_length >= 2); } else { - TKET_ASSERT_WITH_THROW(cycle_length == vertices.size()); + TKET_ASSERT(cycle_length == vertices.size()); } - TKET_ASSERT_WITH_THROW(cycle.decrease > 0); + TKET_ASSERT(cycle.decrease > 0); // We want 50*(decrease)/(num swaps) >= min_candidate_power_percentage. // (We multiply by 50 because a swap can change L by 2, not 1). @@ -104,7 +104,7 @@ void CyclesCandidateManager::discard_lower_power_solutions( for (auto id : m_cycles_to_keep) { highest_decrease = std::max(highest_decrease, cycles.at(id).decrease); } - TKET_ASSERT_WITH_THROW(highest_decrease > 0); + TKET_ASSERT(highest_decrease > 0); for (size_t ii = 0; ii < m_cycles_to_keep.size();) { if (cycles.at(m_cycles_to_keep[ii]).decrease < highest_decrease) { diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.cpp b/tket/src/TokenSwapping/CyclesGrowthManager.cpp index efb794520e..6ed8db9695 100644 --- a/tket/src/TokenSwapping/CyclesGrowthManager.cpp +++ b/tket/src/TokenSwapping/CyclesGrowthManager.cpp @@ -39,7 +39,7 @@ CyclesGrowthManager::Options& CyclesGrowthManager::get_options() { const Cycles& CyclesGrowthManager::get_cycles( bool throw_if_cycles_are_not_candidates) const { - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( !(throw_if_cycles_are_not_candidates && !m_cycles_are_candidates)); return m_cycles; } @@ -85,7 +85,7 @@ bool CyclesGrowthManager::reset( bool CyclesGrowthManager::attempt_to_close_cycles( const VertexMapping& vertex_mapping, DistancesInterface& distances) { - TKET_ASSERT_WITH_THROW(!m_cycles_are_candidates); + TKET_ASSERT(!m_cycles_are_candidates); for (auto id_opt = m_cycles.front_id(); id_opt;) { const auto id = id_opt.value(); id_opt = m_cycles.next(id); @@ -119,7 +119,7 @@ CyclesGrowthManager::GrowthResult CyclesGrowthManager::attempt_to_grow( NeighboursInterface& neighbours) { GrowthResult result; - TKET_ASSERT_WITH_THROW(!m_cycles.empty()); + TKET_ASSERT(!m_cycles.empty()); if (m_cycles.front().vertices.size() >= m_options.max_cycle_size) { m_cycles.clear(); diff --git a/tket/src/TokenSwapping/CyclesPartialTsa.cpp b/tket/src/TokenSwapping/CyclesPartialTsa.cpp index ede47f3b44..046488f18d 100644 --- a/tket/src/TokenSwapping/CyclesPartialTsa.cpp +++ b/tket/src/TokenSwapping/CyclesPartialTsa.cpp @@ -49,20 +49,20 @@ void CyclesPartialTsa::append_partial_solution( single_iteration_partial_solution( swaps, vertex_mapping, distances, neighbours); const auto swap_size_after = swaps.size(); - TKET_ASSERT_WITH_THROW(swap_size_after >= swap_size_before); + TKET_ASSERT(swap_size_after >= swap_size_before); if (swap_size_before == swap_size_after) { break; } } const size_t final_swap_size = swaps.size(); - TKET_ASSERT_WITH_THROW(initial_swap_size <= final_swap_size); + TKET_ASSERT(initial_swap_size <= final_swap_size); if (initial_swap_size == final_swap_size || !path_finder.edge_registration_has_effect()) { return; } // At least one swap was added. const auto current_back_id_opt = swaps.back_id(); - TKET_ASSERT_WITH_THROW(current_back_id_opt); + TKET_ASSERT(current_back_id_opt); auto current_id = current_back_id_opt.value(); for (size_t remaining_swaps = final_swap_size - initial_swap_size;;) { const auto& swap = swaps.at(current_id); @@ -72,7 +72,7 @@ void CyclesPartialTsa::append_partial_solution( break; } const auto prev_id_opt = swaps.previous(current_id); - TKET_ASSERT_WITH_THROW(prev_id_opt); + TKET_ASSERT(prev_id_opt); current_id = prev_id_opt.value(); } } @@ -100,7 +100,7 @@ void CyclesPartialTsa::single_iteration_partial_solution( return; } } - TKET_ASSERT_WITH_THROW(!"growth_manager termination"); + TKET_ASSERT(!"growth_manager termination"); } } // namespace tsa_internal diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp index 6dcc9bdd1b..f5fe4a0050 100644 --- a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp @@ -23,7 +23,7 @@ namespace tsa_internal { CyclicShiftCostEstimate::CyclicShiftCostEstimate( const std::vector& vertices, DistancesInterface& distances) { - TKET_ASSERT_WITH_THROW(vertices.size() >= 2); + TKET_ASSERT(vertices.size() >= 2); // We first work out the total distance v(0)->v(1)-> .. -> v(n) -> v(0). // If we snip out v(i)->v(i+1), the remaining path tells us how many swaps // we need. So, we must snip out the LARGEST distance(v(i), v(i+1)). @@ -37,7 +37,7 @@ CyclicShiftCostEstimate::CyclicShiftCostEstimate( size_t v_index_with_largest_distance = vertices.size() - 1; for (size_t ii = 0; ii + 1 < vertices.size(); ++ii) { const auto distance_i = distances(vertices[ii], vertices[ii + 1]); - TKET_ASSERT_WITH_THROW(distance_i > 0); + TKET_ASSERT(distance_i > 0); total_distance += distance_i; if (distance_i < largest_distance) { largest_distance = distance_i; @@ -58,7 +58,7 @@ CyclicShiftCostEstimate::CyclicShiftCostEstimate( // What we've currently stored is the sum of dist(x,y), // and clearly (sum)(-1) = -(Number of terms in the sum). estimated_concrete_swaps = 2 * total_distance; - TKET_ASSERT_WITH_THROW(estimated_concrete_swaps > vertices.size() - 1); + TKET_ASSERT(estimated_concrete_swaps > vertices.size() - 1); estimated_concrete_swaps -= vertices.size() - 1; } diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp index 5195f0369b..7766f6c9d1 100644 --- a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp @@ -74,7 +74,7 @@ size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { // architectures, since get_distance now should throw if v1, v2 are in // different connected components. However, leave the check in, in case some // other bizarre error causes distance zero to be returned. - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( distance_entry > 0 || AssertMessage() << "DistancesFromArchitecture: architecture has " << arch.n_nodes() << " vertices, " diff --git a/tket/src/TokenSwapping/HybridTsa00.cpp b/tket/src/TokenSwapping/HybridTsa00.cpp index 6ae5e9fb04..9f5df1fd19 100644 --- a/tket/src/TokenSwapping/HybridTsa00.cpp +++ b/tket/src/TokenSwapping/HybridTsa00.cpp @@ -49,11 +49,11 @@ void HybridTsa00::append_partial_solution( swaps, vertex_mapping, distances, neighbours, path_finder); if (swaps_before == swaps.size()) { - TKET_ASSERT_WITH_THROW(all_tokens_home(vertex_mapping)); + TKET_ASSERT(all_tokens_home(vertex_mapping)); return; } } - TKET_ASSERT_WITH_THROW(!"hybrid TSA termination"); + TKET_ASSERT(!"hybrid TSA termination"); } } // namespace tsa_internal diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp index 3fb8e4d8b7..8e4ec6f287 100644 --- a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp @@ -28,7 +28,7 @@ NeighboursFromArchitecture::NeighboursFromArchitecture( const std::vector& NeighboursFromArchitecture::operator()( size_t vertex) { const auto num_vertices = m_arch_mapping.number_of_vertices(); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( vertex < num_vertices || AssertMessage() << "get_neighbours: invalid vertex " << vertex << " (only have " << num_vertices << " vertices)"); @@ -50,7 +50,7 @@ const std::vector& NeighboursFromArchitecture::operator()( for (const Node& node : neighbour_nodes) { const auto neighbour_vertex = m_arch_mapping.get_vertex(node); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( neighbour_vertex != vertex || AssertMessage() << "get_neighbours: vertex " << vertex << " for node " diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp index a2695ea227..5be5d43740 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -81,8 +81,8 @@ void RiverFlowPathFinder::Impl::reset() { void RiverFlowPathFinder::Impl::grow_path( size_t target_vertex, size_t required_path_size) { - TKET_ASSERT_WITH_THROW(path.size() < required_path_size); - TKET_ASSERT_WITH_THROW(!path.empty()); + TKET_ASSERT(path.size() < required_path_size); + TKET_ASSERT(!path.empty()); // We don't yet know how to move on, so we must choose a neighbour. // All candidates will have the same edge count. @@ -114,7 +114,7 @@ void RiverFlowPathFinder::Impl::grow_path( candidate_moves.back().count = edge_count; continue; } - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( neighbour_distance_to_target == remaining_distance || neighbour_distance_to_target == remaining_distance + 1 || AssertMessage() << "d(v_" << path.back() << ", v_" << target_vertex @@ -123,7 +123,7 @@ void RiverFlowPathFinder::Impl::grow_path( << ", at distance " << neighbour_distance_to_target << " to the target v_" << target_vertex); } - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( !candidate_moves.empty() || AssertMessage() << "No neighbours of v_" << path.back() << " at correct distance " << remaining_distance - 1 @@ -170,7 +170,7 @@ const vector& RiverFlowPathFinder::operator()( infinite_loop_guard != 0; --infinite_loop_guard) { m_pimpl->grow_path(vertex2, final_path_size); if (m_pimpl->path.size() == final_path_size) { - TKET_ASSERT_WITH_THROW(m_pimpl->path.back() == vertex2); + TKET_ASSERT(m_pimpl->path.back() == vertex2); m_pimpl->update_data_with_path(); return m_pimpl->path; } diff --git a/tket/src/TokenSwapping/SwapListOptimiser.cpp b/tket/src/TokenSwapping/SwapListOptimiser.cpp index 9fc1417f31..42773437ce 100644 --- a/tket/src/TokenSwapping/SwapListOptimiser.cpp +++ b/tket/src/TokenSwapping/SwapListOptimiser.cpp @@ -77,7 +77,7 @@ SwapListOptimiser::get_id_of_previous_blocker(SwapList& list, SwapID id) { break; } } - TKET_ASSERT_WITH_THROW(terminated_correctly); + TKET_ASSERT(terminated_correctly); // It's hit a copy of itself list.erase(id); list.erase(current_id); @@ -85,7 +85,7 @@ SwapListOptimiser::get_id_of_previous_blocker(SwapList& list, SwapID id) { } bool SwapListOptimiser::move_swap_towards_front(SwapList& list, SwapID id) { - TKET_ASSERT_WITH_THROW(list.front_id()); + TKET_ASSERT(list.front_id()); if (id == list.front_id().value()) { return false; } @@ -166,7 +166,7 @@ void SwapListOptimiser::optimise_pass_with_zero_travel(SwapList& list) { } current_id = next_id_opt.value(); } - TKET_ASSERT_WITH_THROW(!"optimise_pass_with_zero_travel termination"); + TKET_ASSERT(!"optimise_pass_with_zero_travel termination"); } void SwapListOptimiser::optimise_pass_with_frontward_travel(SwapList& list) { @@ -186,7 +186,7 @@ void SwapListOptimiser::optimise_pass_with_frontward_travel(SwapList& list) { } current_id = next_id_opt.value(); } - TKET_ASSERT_WITH_THROW(!"optimise_pass_with_frontward_travel termination"); + TKET_ASSERT(!"optimise_pass_with_frontward_travel termination"); } void SwapListOptimiser::optimise_pass_with_token_tracking(SwapList& list) { @@ -261,14 +261,14 @@ void SwapListOptimiser:: } current_id = next_id_opt.value(); } - TKET_ASSERT_WITH_THROW(terminated_correctly); + TKET_ASSERT(terminated_correctly); const auto new_size = list.size(); if (old_size == new_size) { return; } - TKET_ASSERT_WITH_THROW(new_size < old_size); + TKET_ASSERT(new_size < old_size); } - TKET_ASSERT_WITH_THROW(!"optimise_pass_with_token_tracking termination"); + TKET_ASSERT(!"optimise_pass_with_token_tracking termination"); } void SwapListOptimiser::full_optimise(SwapList& list) { @@ -288,9 +288,9 @@ void SwapListOptimiser::full_optimise( if (old_size == list.size() || list.size() == 0) { return; } - TKET_ASSERT_WITH_THROW(list.size() < old_size); + TKET_ASSERT(list.size() < old_size); } - TKET_ASSERT_WITH_THROW(!"full_optimise termination"); + TKET_ASSERT(!"full_optimise termination"); } } // namespace tsa_internal diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp index 4546b7ed40..3566cafd4c 100644 --- a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp @@ -24,7 +24,7 @@ namespace tsa_internal { std::set get_random_set( RNG& rng, size_t sample_size, size_t population_size) { - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( sample_size <= population_size || !"get_random_set: sample too large"); std::set result; @@ -46,7 +46,7 @@ std::set get_random_set( return result; } } - TKET_ASSERT_WITH_THROW(!"get_random_set: dropped out of loop"); + TKET_ASSERT(!"get_random_set: dropped out of loop"); return result; } diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index 5f1a83861c..e60ab2c213 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -36,7 +36,7 @@ void check_mapping( const VertexMapping& vertex_mapping, VertexMapping& work_mapping) { work_mapping.clear(); for (const auto& entry : vertex_mapping) { - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( work_mapping.count(entry.second) == 0 || AssertMessage() << "Vertices v_" << entry.first << " and v_" << work_mapping[entry.second] @@ -79,7 +79,7 @@ size_t get_source_vertex( return entry.first; } } - TKET_ASSERT_WITH_THROW(!"get_source_vertex"); + TKET_ASSERT(!"get_source_vertex"); return target_vertex; } diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp index f676a35a55..e26a60b976 100644 --- a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp @@ -46,8 +46,8 @@ const CanonicalRelabelling::Result& CanonicalRelabelling::operator()( return m_result; } // If not the identity, at least 2 vertices moved. - TKET_ASSERT_WITH_THROW(desired_mapping.size() >= 2); - TKET_ASSERT_WITH_THROW(desired_mapping.size() <= 6); + TKET_ASSERT(desired_mapping.size() >= 2); + TKET_ASSERT(desired_mapping.size() <= 6); m_desired_mapping = desired_mapping; unsigned next_cyc_index = 0; @@ -63,14 +63,14 @@ const CanonicalRelabelling::Result& CanonicalRelabelling::operator()( infinite_loop_guard != 0; --infinite_loop_guard) { const auto curr_v = this_cycle.back(); const auto target_v = m_desired_mapping.at(curr_v); - TKET_ASSERT_WITH_THROW(m_desired_mapping.erase(curr_v) == 1); + TKET_ASSERT(m_desired_mapping.erase(curr_v) == 1); if (target_v == this_cycle[0]) { terminated_correctly = true; break; } this_cycle.push_back(target_v); } - TKET_ASSERT_WITH_THROW(terminated_correctly); + TKET_ASSERT(terminated_correctly); } // Sort by cycle length, LONGEST cycles first. // But, also want a "stable-like" sort: @@ -98,18 +98,18 @@ const CanonicalRelabelling::Result& CanonicalRelabelling::operator()( m_result.new_to_old_vertices.clear(); for (auto ii : m_sorted_cycles_indices) { const auto& cyc = m_cycles[ii]; - TKET_ASSERT_WITH_THROW(!cyc.empty()); - TKET_ASSERT_WITH_THROW(cyc.size() <= 6); + TKET_ASSERT(!cyc.empty()); + TKET_ASSERT(cyc.size() <= 6); for (size_t old_v : cyc) { m_result.new_to_old_vertices.push_back(old_v); } } - TKET_ASSERT_WITH_THROW(m_result.new_to_old_vertices.size() <= 6); + TKET_ASSERT(m_result.new_to_old_vertices.size() <= 6); m_result.old_to_new_vertices.clear(); for (unsigned ii = 0; ii < m_result.new_to_old_vertices.size(); ++ii) { m_result.old_to_new_vertices[m_result.new_to_old_vertices[ii]] = ii; } - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( m_result.new_to_old_vertices.size() == m_result.old_to_new_vertices.size()); diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp index d31a74c9e8..d6c5f96c07 100644 --- a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp @@ -62,11 +62,11 @@ ExactMappingLookup::improve_upon_existing_result( } return m_result; } - TKET_ASSERT_WITH_THROW(relabelling.permutation_hash != 0); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT(relabelling.permutation_hash != 0); + TKET_ASSERT( relabelling.new_to_old_vertices.size() == relabelling.old_to_new_vertices.size()); - TKET_ASSERT_WITH_THROW(relabelling.new_to_old_vertices.size() >= 2); + TKET_ASSERT(relabelling.new_to_old_vertices.size() >= 2); fill_result_from_table(relabelling, edges, max_number_of_swaps); return m_result; @@ -102,8 +102,8 @@ void ExactMappingLookup::fill_result_from_table( } const auto new_v1 = new_v1_opt.value(); const auto new_v2 = new_v2_opt.value(); - TKET_ASSERT_WITH_THROW(new_v1 <= 5); - TKET_ASSERT_WITH_THROW(new_v2 <= 5); + TKET_ASSERT(new_v1 <= 5); + TKET_ASSERT(new_v2 <= 5); new_edges_bitset |= SwapConversion::get_edges_bitset( SwapConversion::get_hash_from_swap(get_swap(new_v1, new_v2))); } @@ -112,13 +112,13 @@ void ExactMappingLookup::fill_result_from_table( relabelling_result.permutation_hash, new_edges_bitset, max_number_of_swaps); - TKET_ASSERT_WITH_THROW(table_result.number_of_swaps > 0); + TKET_ASSERT(table_result.number_of_swaps > 0); if (table_result.number_of_swaps > max_number_of_swaps) { // No result in the table. return; } - TKET_ASSERT_WITH_THROW(table_result.edges_bitset != 0); - TKET_ASSERT_WITH_THROW(table_result.swaps_code > 0); + TKET_ASSERT(table_result.edges_bitset != 0); + TKET_ASSERT(table_result.swaps_code > 0); m_result.success = true; m_result.swaps.clear(); @@ -131,7 +131,7 @@ void ExactMappingLookup::fill_result_from_table( relabelling_result.new_to_old_vertices.at(new_swap.first), relabelling_result.new_to_old_vertices.at(new_swap.second))); } - TKET_ASSERT_WITH_THROW(m_result.swaps.size() <= 16); + TKET_ASSERT(m_result.swaps.size() <= 16); } } // namespace tsa_internal diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp index f9ae1b0655..39fb0ccbdc 100644 --- a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp +++ b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp @@ -85,10 +85,10 @@ through all entries. void FilteredSwapSequences::initialise( std::vector codes) { // Can only initialise once. - TKET_ASSERT_WITH_THROW(m_internal_data.empty()); + TKET_ASSERT(m_internal_data.empty()); std::sort(codes.begin(), codes.end()); - TKET_ASSERT_WITH_THROW(!codes.empty()); - TKET_ASSERT_WITH_THROW(codes[0] != 0); + TKET_ASSERT(!codes.empty()); + TKET_ASSERT(codes[0] != 0); TrimmedSingleSequenceData datum; for (size_t ii = 0; ii < codes.size(); ++ii) { @@ -104,7 +104,7 @@ void FilteredSwapSequences::initialise( void FilteredSwapSequences::push_back(TrimmedSingleSequenceData datum) { auto bitset_copy = datum.edges_bitset; - TKET_ASSERT_WITH_THROW(bitset_copy != 0); + TKET_ASSERT(bitset_copy != 0); SwapConversion::EdgesBitset bit_to_use = 0; // We want to add to the smallest list, to keep the data balanced. @@ -135,7 +135,7 @@ void FilteredSwapSequences::push_back(TrimmedSingleSequenceData datum) { } } } - TKET_ASSERT_WITH_THROW(bit_to_use != 0); + TKET_ASSERT(bit_to_use != 0); m_internal_data[bit_to_use].push_back(datum); } @@ -230,9 +230,9 @@ construct_and_return_full_table() { // The simplest nontrivial permutation arises from a single swap (a,b), // which under the canonical relabelling is converted to (01), // which has hash 2. - TKET_ASSERT_WITH_THROW(entry.first >= 2); + TKET_ASSERT(entry.first >= 2); // The largest possible hash comes from (01)(23)(45). - TKET_ASSERT_WITH_THROW(entry.first <= 222); + TKET_ASSERT(entry.first <= 222); result[entry.first].initialise(entry.second); } return result; diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp index 2ff95a50d4..891e80770a 100644 --- a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp @@ -54,7 +54,7 @@ const ExactMappingLookup::Result& PartialMappingLookup::operator()( // For next_permutation, let's permute the empty SOURCE vertices. // They are already sorted, thus already at the first permutation // in the ordering, because they came from the keys of desired_mapping. - TKET_ASSERT_WITH_THROW(std::next_permutation( + TKET_ASSERT(std::next_permutation( m_empty_source_vertices.begin(), m_empty_source_vertices.end())); m_altered_mapping = desired_mapping; diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp index 76770b9a32..0c8d8ad3ad 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp @@ -28,7 +28,7 @@ static vector get_swaps_fixed_vector() { swaps.push_back(get_swap(ii, jj)); } } - TKET_ASSERT_WITH_THROW(swaps.size() == 15); + TKET_ASSERT(swaps.size() == 15); return swaps; } @@ -67,8 +67,8 @@ unsigned SwapConversion::get_number_of_swaps( ++num_swaps; const auto swap_hash = swaps_code & 0xF; swaps_code >>= 4; - TKET_ASSERT_WITH_THROW(swap_hash > 0); - TKET_ASSERT_WITH_THROW(swap_hash <= 15); + TKET_ASSERT(swap_hash > 0); + TKET_ASSERT(swap_hash <= 15); } return num_swaps; } @@ -78,7 +78,7 @@ SwapConversion::EdgesBitset SwapConversion::get_edges_bitset( EdgesBitset edges_bitset = 0; while (swaps_code != 0) { const auto swap_hash = swaps_code & 0xF; - TKET_ASSERT_WITH_THROW(swap_hash > 0); + TKET_ASSERT(swap_hash > 0); edges_bitset |= (1u << (swap_hash - 1)); swaps_code >>= 4; } diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp index 84672bd153..9fa6286e5a 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp @@ -79,12 +79,11 @@ SwapListSegmentOptimiser::optimise_segment( bool should_store = m_output.initial_segment_size == 0; if (!should_store) { // Something IS stored, but is our new solution better? - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( m_output.initial_segment_size >= m_best_optimised_swaps.size()); const size_t current_decrease = m_output.initial_segment_size - m_best_optimised_swaps.size(); - TKET_ASSERT_WITH_THROW( - current_number_of_swaps >= lookup_result.swaps.size()); + TKET_ASSERT(current_number_of_swaps >= lookup_result.swaps.size()); const size_t new_decrease = current_number_of_swaps - lookup_result.swaps.size(); should_store = new_decrease > current_decrease; @@ -140,8 +139,7 @@ void SwapListSegmentOptimiser::fill_final_output_and_swaplist( return; } m_output.final_segment_size = m_best_optimised_swaps.size(); - TKET_ASSERT_WITH_THROW( - m_output.final_segment_size <= m_output.initial_segment_size); + TKET_ASSERT(m_output.final_segment_size <= m_output.initial_segment_size); const auto initial_size = swap_list.size(); if (m_best_optimised_swaps.empty()) { @@ -152,7 +150,7 @@ void SwapListSegmentOptimiser::fill_final_output_and_swaplist( initial_id, m_best_optimised_swaps.cbegin(), m_best_optimised_swaps.cend()); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( overwrite_result.number_of_overwritten_elements == m_best_optimised_swaps.size()); m_output.new_segment_last_id = @@ -168,7 +166,7 @@ void SwapListSegmentOptimiser::fill_final_output_and_swaplist( next_id_opt.value(), remaining_elements_to_erase); } } - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( swap_list.size() + m_output.initial_segment_size == initial_size + m_output.final_segment_size); } diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp index 16e36023d1..6478942473 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp @@ -69,12 +69,12 @@ static bool erase_empty_swaps_interval( case EmptySwapCheckResult::TERMINATE_AFTER_ERASURE: return false; default: - TKET_ASSERT_WITH_THROW(!"unknown EmptySwapCheckResult enum"); + TKET_ASSERT(!"unknown EmptySwapCheckResult enum"); break; } } // Should never get here! - TKET_ASSERT_WITH_THROW(!"erase_empty_swaps_interval falied to terminate"); + TKET_ASSERT(!"erase_empty_swaps_interval falied to terminate"); return false; } @@ -89,16 +89,16 @@ static bool perform_current_nonempty_swap( if (vertices_with_tokens.count(swap.first) == 0) { // No empty swaps! - TKET_ASSERT_WITH_THROW(vertices_with_tokens.count(swap.second) != 0); + TKET_ASSERT(vertices_with_tokens.count(swap.second) != 0); // Second has a token, first doesn't. - TKET_ASSERT_WITH_THROW(vertices_with_tokens.insert(swap.first).second); - TKET_ASSERT_WITH_THROW(vertices_with_tokens.erase(swap.second) == 1); + TKET_ASSERT(vertices_with_tokens.insert(swap.first).second); + TKET_ASSERT(vertices_with_tokens.erase(swap.second) == 1); } else { // First has a token. if (vertices_with_tokens.count(swap.second) == 0) { // Second has no token. - TKET_ASSERT_WITH_THROW(vertices_with_tokens.erase(swap.first) == 1); - TKET_ASSERT_WITH_THROW(vertices_with_tokens.insert(swap.second).second); + TKET_ASSERT(vertices_with_tokens.erase(swap.first) == 1); + TKET_ASSERT(vertices_with_tokens.insert(swap.second).second); } } @@ -142,7 +142,7 @@ void SwapListTableOptimiser::optimise( break; } } - TKET_ASSERT_WITH_THROW(terminated_correctly); + TKET_ASSERT(terminated_correctly); if (swap_list.size() <= 1) { return; } @@ -163,12 +163,12 @@ void SwapListTableOptimiser::optimise( // Must reverse again to get back to start! swap_list.reverse(); const auto new_size = swap_list.size(); - TKET_ASSERT_WITH_THROW(new_size <= old_size); + TKET_ASSERT(new_size <= old_size); if (new_size == old_size) { return; } } - TKET_ASSERT_WITH_THROW(!"SwapListTableOptimiser::optimise"); + TKET_ASSERT(!"SwapListTableOptimiser::optimise"); } void SwapListTableOptimiser::optimise_in_forward_direction( diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp index ecb434b566..7060995b1b 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp @@ -59,9 +59,9 @@ const VertexMapResizing::Result& VertexMapResizing::resize_mapping( return m_result; } } - TKET_ASSERT_WITH_THROW(!"VertexMapResizing::resize_mapping"); + TKET_ASSERT(!"VertexMapResizing::resize_mapping"); } - TKET_ASSERT_WITH_THROW(mapping.size() <= desired_size); + TKET_ASSERT(mapping.size() <= desired_size); bool terminated_correctly = false; for (auto infinite_loop_guard = 1 + desired_size; infinite_loop_guard > 0; --infinite_loop_guard) { @@ -78,9 +78,9 @@ const VertexMapResizing::Result& VertexMapResizing::resize_mapping( break; } // Must have added exactly one vertex. - TKET_ASSERT_WITH_THROW(old_size + 1 == new_size); + TKET_ASSERT(old_size + 1 == new_size); } - TKET_ASSERT_WITH_THROW(terminated_correctly); + TKET_ASSERT(terminated_correctly); // It's acceptable to have too few vertices, // it can still be looked up in the table. m_result.success = true; @@ -148,8 +148,8 @@ void VertexMapResizing::remove_vertex(VertexMapping& mapping) { } } if (minimum_edges_removed < invalid_number_of_edges) { - TKET_ASSERT_WITH_THROW(mapping.at(best_vertex) == best_vertex); - TKET_ASSERT_WITH_THROW(mapping.erase(best_vertex) == 1); + TKET_ASSERT(mapping.at(best_vertex) == best_vertex); + TKET_ASSERT(mapping.erase(best_vertex) == 1); } } diff --git a/tket/src/TokenSwapping/TrivialTSA.cpp b/tket/src/TokenSwapping/TrivialTSA.cpp index a76e47f29d..cbe621b432 100644 --- a/tket/src/TokenSwapping/TrivialTSA.cpp +++ b/tket/src/TokenSwapping/TrivialTSA.cpp @@ -69,7 +69,7 @@ bool TrivialTSA::grow_cycle_forwards( current_id = m_abstract_cycles_vertices.insert_after(current_id); m_abstract_cycles_vertices.at(current_id) = citer->second; } - TKET_ASSERT_WITH_THROW(!"TrivialTSA::grow_cycle_forwards: " + TKET_ASSERT(!"TrivialTSA::grow_cycle_forwards: " "hit vertex count limit; invalid vertex mapping"); return false; } @@ -93,7 +93,7 @@ void TrivialTSA::grow_cycle_backwards(Endpoints& endpoints) { current_id = m_abstract_cycles_vertices.insert_before(current_id); m_abstract_cycles_vertices.at(current_id) = citer->second; } - TKET_ASSERT_WITH_THROW(!"TrivialTSA::grow_cycle_backwards: " + TKET_ASSERT(!"TrivialTSA::grow_cycle_backwards: " "hit vertex count limit; invalid vertex mapping"); } @@ -103,21 +103,20 @@ void TrivialTSA::do_final_checks() const { m_vertices_seen.insert(entry.first); m_vertices_seen.insert(entry.second); } - TKET_ASSERT_WITH_THROW( - m_vertices_seen.size() == m_abstract_cycles_vertices.size()); + TKET_ASSERT(m_vertices_seen.size() == m_abstract_cycles_vertices.size()); // Erase them again...! for (const auto& endpoints : m_cycle_endpoints) { for (auto id = endpoints.first;; id = m_abstract_cycles_vertices.next(id).value()) { - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( m_vertices_seen.erase(m_abstract_cycles_vertices.at(id)) == 1); if (id == endpoints.second) { break; } } } - TKET_ASSERT_WITH_THROW(m_vertices_seen.empty()); + TKET_ASSERT(m_vertices_seen.empty()); } void TrivialTSA::fill_disjoint_abstract_cycles( @@ -143,7 +142,7 @@ void TrivialTSA::fill_disjoint_abstract_cycles( // Now, add the vertices to vertices seen... for (auto id = endpoints.first;; id = m_abstract_cycles_vertices.next(id).value()) { - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( m_vertices_seen.insert(m_abstract_cycles_vertices.at(id)).second); if (id == endpoints.second) { break; @@ -178,7 +177,7 @@ void TrivialTSA::append_partial_solution( append_partial_solution_with_all_cycles(swaps, vertex_mapping, path_finder); return; } - TKET_ASSERT_WITH_THROW(m_options == Options::BREAK_AFTER_PROGRESS); + TKET_ASSERT(m_options == Options::BREAK_AFTER_PROGRESS); // We're only going to do ONE cycle; so find which cycle // has the shortest estimated number of swaps size_t best_estimated_concrete_swaps = std::numeric_limits::max(); @@ -188,28 +187,27 @@ void TrivialTSA::append_partial_solution( for (const auto& endpoints : m_cycle_endpoints) { copy_vertices_to_work_vector(endpoints); if (m_vertices_work_vector.size() < 2) { - TKET_ASSERT_WITH_THROW(m_vertices_work_vector.size() == 1); + TKET_ASSERT(m_vertices_work_vector.size() == 1); continue; } const CyclicShiftCostEstimate estimate(m_vertices_work_vector, distances); - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( estimate.estimated_concrete_swaps < std::numeric_limits::max()); - TKET_ASSERT_WITH_THROW( - estimate.start_v_index < m_vertices_work_vector.size()); + TKET_ASSERT(estimate.start_v_index < m_vertices_work_vector.size()); if (estimate.estimated_concrete_swaps < best_estimated_concrete_swaps) { best_estimated_concrete_swaps = estimate.estimated_concrete_swaps; start_v_index = estimate.start_v_index; best_endpoints = endpoints; } } - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( best_estimated_concrete_swaps < std::numeric_limits::max()); const auto swap_size_before = swaps.size(); const auto decrease = append_partial_solution_with_single_cycle( best_endpoints, start_v_index, swaps, vertex_mapping, distances, path_finder); - TKET_ASSERT_WITH_THROW(swap_size_before < swaps.size()); - TKET_ASSERT_WITH_THROW(decrease > 0); + TKET_ASSERT(swap_size_before < swaps.size()); + TKET_ASSERT(decrease > 0); } void TrivialTSA::copy_vertices_to_work_vector(const Endpoints& endpoints) { @@ -238,9 +236,9 @@ void TrivialTSA::append_partial_solution_with_all_cycles( // Abstract swap(v1, v2). const auto v1 = m_vertices_work_vector[ii]; const auto v2 = m_vertices_work_vector[ii - 1]; - TKET_ASSERT_WITH_THROW(v1 != v2); + TKET_ASSERT(v1 != v2); const auto& path = path_finder(v1, v2); - TKET_ASSERT_WITH_THROW(path.size() >= 2); + TKET_ASSERT(path.size() >= 2); append_swaps_to_interchange_path_ends(path, vertex_mapping, swaps); } } @@ -251,8 +249,8 @@ size_t TrivialTSA::append_partial_solution_with_single_cycle( VertexMapping& vertex_mapping, DistancesInterface& distances, PathFinderInterface& path_finder) { copy_vertices_to_work_vector(endpoints); - TKET_ASSERT_WITH_THROW(m_vertices_work_vector.size() >= 2); - TKET_ASSERT_WITH_THROW(start_v_index < m_vertices_work_vector.size()); + TKET_ASSERT(m_vertices_work_vector.size() >= 2); + TKET_ASSERT(start_v_index < m_vertices_work_vector.size()); // Can go negative! But MUST be >= 1 at the end // (otherwise this cycle was useless and should never have occurred). @@ -268,9 +266,9 @@ size_t TrivialTSA::append_partial_solution_with_single_cycle( const auto v2 = m_vertices_work_vector [((ii - 1) + start_v_index) % m_vertices_work_vector.size()]; - TKET_ASSERT_WITH_THROW(v1 != v2); + TKET_ASSERT(v1 != v2); const auto& path = path_finder(v1, v2); - TKET_ASSERT_WITH_THROW(path.size() >= 2); + TKET_ASSERT(path.size() >= 2); // e.g., to swap endpoints: [x,a,b,c,y] -> [y,a,b,c,x], // do concrete swaps xa ab bc cy bc ab xa. @@ -298,8 +296,7 @@ size_t TrivialTSA::append_partial_solution_with_single_cycle( } // The cycle MUST have decreased L overall, // otherwise we shouldn't have done it. - TKET_ASSERT_WITH_THROW( - !"TrivialTSA::append_partial_solution_with_single_cycle"); + TKET_ASSERT(!"TrivialTSA::append_partial_solution_with_single_cycle"); return 0; } diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp index d41bd78cec..a0f3e350e9 100644 --- a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -37,10 +37,10 @@ VectorListHybridSkeleton::VectorListHybridSkeleton() void VectorListHybridSkeleton::clear() { if (m_links.empty()) { - TKET_ASSERT_WITH_THROW(m_size == 0); - TKET_ASSERT_WITH_THROW(m_front == INVALID_INDEX); - TKET_ASSERT_WITH_THROW(m_back == INVALID_INDEX); - TKET_ASSERT_WITH_THROW(m_deleted_front == INVALID_INDEX); + TKET_ASSERT(m_size == 0); + TKET_ASSERT(m_front == INVALID_INDEX); + TKET_ASSERT(m_back == INVALID_INDEX); + TKET_ASSERT(m_deleted_front == INVALID_INDEX); return; } m_size = 0; @@ -61,13 +61,13 @@ void VectorListHybridSkeleton::clear() { void VectorListHybridSkeleton::fast_clear() { if (m_back == INVALID_INDEX) { // No elements stored currently; nothing to do. - TKET_ASSERT_WITH_THROW(m_size == 0); - TKET_ASSERT_WITH_THROW(m_front == INVALID_INDEX); + TKET_ASSERT(m_size == 0); + TKET_ASSERT(m_front == INVALID_INDEX); return; } - TKET_ASSERT_WITH_THROW(m_size > 0); - TKET_ASSERT_WITH_THROW(m_front != INVALID_INDEX); - TKET_ASSERT_WITH_THROW(m_links[m_back].next == INVALID_INDEX); + TKET_ASSERT(m_size > 0); + TKET_ASSERT(m_front != INVALID_INDEX); + TKET_ASSERT(m_links[m_back].next == INVALID_INDEX); // There are some existing elements. // Recall that deleted elements are ONLY a forward list, // so we don't need to update "previous". @@ -90,9 +90,9 @@ void VectorListHybridSkeleton::reverse() { // Nothing to do. return; } - TKET_ASSERT_WITH_THROW(m_front != INVALID_INDEX); - TKET_ASSERT_WITH_THROW(m_back != INVALID_INDEX); - TKET_ASSERT_WITH_THROW(m_front != m_back); + TKET_ASSERT(m_front != INVALID_INDEX); + TKET_ASSERT(m_back != INVALID_INDEX); + TKET_ASSERT(m_front != m_back); // The deleted element links don't need to change. { auto current_index = m_front; @@ -103,13 +103,13 @@ void VectorListHybridSkeleton::reverse() { const auto next_index = link.next; std::swap(link.next, link.previous); if (next_index >= m_links.size()) { - TKET_ASSERT_WITH_THROW(next_index == INVALID_INDEX); + TKET_ASSERT(next_index == INVALID_INDEX); terminated_correctly = true; break; } current_index = next_index; } - TKET_ASSERT_WITH_THROW(terminated_correctly); + TKET_ASSERT(terminated_correctly); } std::swap(m_front, m_back); } @@ -161,7 +161,7 @@ void VectorListHybridSkeleton::erase_interval( for (size_t nn = 1; nn < number_of_elements; ++nn) { last_element_index = m_links.at(last_element_index).next; - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( last_element_index < m_links.size() || AssertMessage() << "VectorListHybridSkeleton::erase_interval with start index " @@ -169,7 +169,7 @@ void VectorListHybridSkeleton::erase_interval( << ", size " << m_links.size() << ", run out of elements at N=" << nn << " (got index " << last_element_index << ")"); } - TKET_ASSERT_WITH_THROW(number_of_elements <= m_size); + TKET_ASSERT(number_of_elements <= m_size); m_size -= number_of_elements; // Now, splice the soon-to-be-logically-erased interval into the deleted @@ -188,14 +188,14 @@ void VectorListHybridSkeleton::erase_interval( if (index_of_node_before_interval < m_links.size()) { // There IS a previous node to be dealt with. auto& next_node_index_ref = m_links[index_of_node_before_interval].next; - TKET_ASSERT_WITH_THROW(next_node_index_ref == index); + TKET_ASSERT(next_node_index_ref == index); // This is correct even if index_of_node_after_interval is INVALID_INDEX. next_node_index_ref = index_of_node_after_interval; - TKET_ASSERT_WITH_THROW(m_front != index); + TKET_ASSERT(m_front != index); } else { // No previous node, we must have been at the start already. - TKET_ASSERT_WITH_THROW(index_of_node_before_interval == INVALID_INDEX); - TKET_ASSERT_WITH_THROW(m_front == index); + TKET_ASSERT(index_of_node_before_interval == INVALID_INDEX); + TKET_ASSERT(m_front == index); m_front = index_of_node_after_interval; } // Link the node AFTER the interval to the new previous node. @@ -203,24 +203,24 @@ void VectorListHybridSkeleton::erase_interval( // There are more unerased elements after the interval, // so the first one must be dealt with. auto& prev_node_index = m_links[index_of_node_after_interval].previous; - TKET_ASSERT_WITH_THROW(prev_node_index == last_element_index); + TKET_ASSERT(prev_node_index == last_element_index); // Correct even if there IS no node before the interval. prev_node_index = index_of_node_before_interval; - TKET_ASSERT_WITH_THROW(m_back != last_element_index); + TKET_ASSERT(m_back != last_element_index); } else { // No node after, we have erased up to the back. - TKET_ASSERT_WITH_THROW(index_of_node_after_interval == INVALID_INDEX); - TKET_ASSERT_WITH_THROW(m_back == last_element_index); + TKET_ASSERT(index_of_node_after_interval == INVALID_INDEX); + TKET_ASSERT(m_back == last_element_index); m_back = index_of_node_before_interval; } if (m_size == 0) { - TKET_ASSERT_WITH_THROW(m_front == INVALID_INDEX); - TKET_ASSERT_WITH_THROW(m_back == INVALID_INDEX); + TKET_ASSERT(m_front == INVALID_INDEX); + TKET_ASSERT(m_back == INVALID_INDEX); } else { - TKET_ASSERT_WITH_THROW(m_front < m_links.size()); - TKET_ASSERT_WITH_THROW(m_back < m_links.size()); + TKET_ASSERT(m_front < m_links.size()); + TKET_ASSERT(m_back < m_links.size()); if (m_size == 1) { - TKET_ASSERT_WITH_THROW(m_front == m_back); + TKET_ASSERT(m_front == m_back); } } } diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp index f09e2b9fae..3b3f4a9bce 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp @@ -472,13 +472,13 @@ OverwriteIntervalResult VectorListHybrid::overwrite_interval( OverwriteIntervalResult result; result.final_overwritten_element_id = id; CIter citer = new_elements_cbegin; - TKET_ASSERT_WITH_THROW(citer != new_elements_cend); + TKET_ASSERT(citer != new_elements_cend); const auto max_number_of_elements = m_links_data.size(); result.number_of_overwritten_elements = 0; for (;;) { m_data.at(result.final_overwritten_element_id) = *citer; ++result.number_of_overwritten_elements; - TKET_ASSERT_WITH_THROW( + TKET_ASSERT( result.number_of_overwritten_elements <= max_number_of_elements); ++citer; if (citer == new_elements_cend) { @@ -489,7 +489,7 @@ OverwriteIntervalResult VectorListHybrid::overwrite_interval( m_links_data.next(result.final_overwritten_element_id); } // Should be impossible to reach here - TKET_ASSERT_WITH_THROW(!"VectorListHybrid::overwrite_interval"); + TKET_ASSERT(!"VectorListHybrid::overwrite_interval"); return result; } diff --git a/tket/src/TokenSwapping/main_entry_functions.cpp b/tket/src/TokenSwapping/main_entry_functions.cpp index 2ed9ba9e59..7390632a06 100644 --- a/tket/src/TokenSwapping/main_entry_functions.cpp +++ b/tket/src/TokenSwapping/main_entry_functions.cpp @@ -47,7 +47,7 @@ std::vector> get_swaps( vertex_mapping[arch_mapping.get_vertex(node_entry.first)] = arch_mapping.get_vertex(node_entry.second); } - TKET_ASSERT_WITH_THROW(vertex_mapping.size() == node_mapping.size()); + TKET_ASSERT(vertex_mapping.size() == node_mapping.size()); check_mapping(vertex_mapping); SwapList raw_swap_list; diff --git a/tket/src/Utils/AssertMessage.cpp b/tket/src/Utils/AssertMessage.cpp index 3bd65b285c..465740de1b 100644 --- a/tket/src/Utils/AssertMessage.cpp +++ b/tket/src/Utils/AssertMessage.cpp @@ -14,8 +14,6 @@ #include "AssertMessage.hpp" -#include - namespace tket { // GCOVR_EXCL_START @@ -25,8 +23,9 @@ std::string AssertMessage::get_error_message() { const auto message = get_error_stream().str(); // Clear the global stream, ready for the next message - // (in the assert with throw variants, we may try/catch - // multiple times). + // (currently this isn't necessary, because tket assert + // immediately aborts; but it may become necessary again in future, + // if we have assert variants with throws and multiple try/catch). get_error_stream().str(std::string()); return message; } @@ -37,10 +36,6 @@ std::stringstream& AssertMessage::get_error_stream() { static std::stringstream ss; return ss; } - -void AssertMessage::throw_message(const std::string& str) { - throw std::runtime_error(str); -} // GCOVR_EXCL_STOP } // namespace tket diff --git a/tket/src/Utils/include/Utils/Assert.hpp b/tket/src/Utils/include/Utils/Assert.hpp index 5715f134a7..45724fc939 100644 --- a/tket/src/Utils/include/Utils/Assert.hpp +++ b/tket/src/Utils/include/Utils/Assert.hpp @@ -42,6 +42,13 @@ * the code coverage DOES listen to the start/stop tags and * ignore all the branching. * So, we're happy to have as many "if" statements and branches as we like! + * + * Note: we previously had some exceptions, but it led to + * problems which we could not resolve: + * https://stackoverflow.com/questions/42003783/ + * lcov-gcov-branch-coverage-with-c-producing-branches-all-over-the-place?rq=1 + * Thus, if you want to throw exceptions rather than abort, + * there are unexpected problems like this which need to be overcome somehow. */ #define TKET_ASSERT(b) \ /* GCOVR_EXCL_START */ \ @@ -75,41 +82,3 @@ std::abort(); \ } \ } while (0) /* GCOVR_EXCL_STOP */ - -/** Like TKET_ASSERT, but throws an exception instead of aborting - * if the condition is not satisfied. - * - * Note: this may seem convoluted. That's because the code coverage - * test programme annoyingly adds lots of branches if exceptions are thrown - * explicitly, despite the STOP/START tags telling it to ignore the code. - * See - * - * https://stackoverflow.com/questions/42003783/ - * lcov-gcov-branch-coverage-with-c-producing-branches-all-over-the-place?rq=1 - * - * We tried "hiding" the exceptions from this macro by putting the throws - * inside another function defined elsewhere. That did make some - * difference, but try/catch blocks also seemed to cause extra - * branching problems. - * Thus, we remove all explicit exceptions AND try/catch blocks, - * in the hope that it will cut down on the undesired extra branches. - * Thus, unlike TKET_ASSERT, an exception thrown by the EVALUATION of b - * will not be caught. But this should be very rare, - * AND we're explicitly trying to throw an exception INSTEAD of aborting, - * so this seems not too bad. - */ -#define TKET_ASSERT_WITH_THROW(b) \ - /* GCOVR_EXCL_START */ \ - do { \ - if (!(b)) { \ - std::stringstream msg; \ - msg << "Assertion '" << #b << "' (" << __FILE__ << " : " << __func__ \ - << " : " << __LINE__ << ") failed"; \ - const auto extra_message = tket::AssertMessage::get_error_message(); \ - if (!extra_message.empty()) { \ - msg << ": '" << extra_message << "'"; \ - } \ - msg << "."; \ - tket::AssertMessage::throw_message(msg.str()); \ - } \ - } while (0) /* GCOVR_EXCL_STOP */ diff --git a/tket/src/Utils/include/Utils/AssertMessage.hpp b/tket/src/Utils/include/Utils/AssertMessage.hpp index 39b6fdc25b..38779fc838 100644 --- a/tket/src/Utils/include/Utils/AssertMessage.hpp +++ b/tket/src/Utils/include/Utils/AssertMessage.hpp @@ -21,30 +21,27 @@ namespace tket { // GCOVR_EXCL_START /** This is only for use with TKET_ASSERT, when we want to give a more detailed * error message than just the assertion code and location. - * Also, some code might seem rather strange, but that's because exceptions + * Also, some code might seem strange, but that's because exceptions * can generate many extra branches in test coverage, see * * https://stackoverflow.com/questions/42003783/ * lcov-gcov-branch-coverage-with-c-producing-branches-all-over-the-place?rq=1 * - * We want to hide the throws (or at least, have one single throw), - * and also provide a stringstream to avoid having to construct one - * every time the assert is checked (asserts must have - * almost zero performance impact if they are not triggered). + * Thus, we avoid exceptions. */ class AssertMessage { public: /** Construct the object, to begin writing to the stream. */ AssertMessage(); - /** Always returns false, so that "|| AssertMessage() << a)" - * becomes "|| false)". + /** Always returns false, so that "... || AssertMessage() << a)" + * becomes "... || false)". */ operator bool() const; /** Every streamable object x can be written to the stream. */ template - AssertMessage& operator<<(const T& x) { + const AssertMessage& operator<<(const T& x) const { get_error_stream() << x; return *this; } @@ -55,11 +52,6 @@ class AssertMessage { */ static std::string get_error_message(); - /** Simply throws a std::runtime_error with the given message; - * hopefully this will fool the test coverage programme. - */ - static void throw_message(const std::string& str); - private: /** Previously the error message for later use by TKET_ASSERT macros * was passed on by exceptions within operator bool(), but that diff --git a/tket/tests/Utils/test_TketAssertWithThrow.cpp b/tket/tests/Utils/test_TketAssertWithThrow.cpp deleted file mode 100644 index 1c9c2f5c6d..0000000000 --- a/tket/tests/Utils/test_TketAssertWithThrow.cpp +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include - -#include "Utils/Assert.hpp" - -using Catch::Matchers::Contains; - -// An assert function with abort obviously cannot be tested here; -// but we CAN test assert functions which only throw. -namespace tket { -namespace { -// Just ensure that we have checked every message. -class MessageChecker { - public: - explicit MessageChecker(const std::vector& calc_messages) - : m_ii_count(0), m_calc_messages(calc_messages) {} - - const std::string& get_message(int ii) { - ++m_ii_count; - m_values_of_ii_checked.insert(ii); - return m_calc_messages.at(ii); - } - - void final_checks() const { - CHECK(m_values_of_ii_checked.size() == m_calc_messages.size()); - // the ii should be [0,1,2,...,m]. - CHECK(m_values_of_ii_checked.size() == m_ii_count); - CHECK(*m_values_of_ii_checked.cbegin() == 0); - CHECK( - *m_values_of_ii_checked.crbegin() == m_values_of_ii_checked.size() - 1); - } - - private: - unsigned m_ii_count; - const std::vector& m_calc_messages; - std::set m_values_of_ii_checked; -}; - -} // namespace - -static std::vector get_message_indices_with_filename( - const std::vector& messages) { - std::vector indices; - for (unsigned ii = 0; ii < messages.size(); ++ii) { - if (messages[ii].find("test_TketAssertWithThrow.cpp") != - std::string::npos) { - indices.push_back(ii); - } - } - return indices; -} - -static int get_number(int nn) { - if (nn > 15) { - throw std::runtime_error("Error!!"); - } - return nn - 10; -} - -SCENARIO("Simple asserts with throws") { - std::vector calc_messages; - std::vector values_of_nn_with_error; - - for (int nn = 0; nn <= 20; ++nn) { - try { - // Should throw for nn in [3,5] - TKET_ASSERT_WITH_THROW((nn - 3) * (nn - 5) > 0); - - // Should throw for nn in [8,10] - TKET_ASSERT_WITH_THROW( - (nn - 8) * (nn - 10) > 0 || AssertMessage() << "N=" << nn); - - // Should throw for [16,20] (the function throws). - TKET_ASSERT_WITH_THROW(get_number(nn) < 20); - } catch (const std::exception& e) { - values_of_nn_with_error.push_back(nn); - std::stringstream ss; - ss << "CHECK: nn=" << nn << " ; " << e.what(); - calc_messages.emplace_back(ss.str()); - } - } - - CHECK(calc_messages.size() == 11); - CHECK( - get_message_indices_with_filename(calc_messages) == - std::vector{0, 1, 2, 3, 4, 5}); - - MessageChecker checker(calc_messages); - - for (int ii = 0; ii <= 2; ++ii) { - const auto& message = checker.get_message(ii); - CHECK_THAT( - message, - Contains(std::string("CHECK: nn=") + std::to_string(ii + 3) + " ; ")); - CHECK_THAT(message, Contains("Assertion '(nn - 3) * (nn - 5) > 0'")); - } - for (int ii = 3; ii <= 5; ++ii) { - const auto& message = checker.get_message(ii); - const std::string n_value = std::to_string(ii + 5); - CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); - CHECK_THAT( - message, - Contains("Assertion '(nn - 8) * (nn - 10) > 0 || AssertMessage() <<")); - CHECK_THAT(message, Contains("failed:")); - CHECK_THAT(message, Contains(std::string("'N=") + n_value + "'")); - } - for (int ii = 6; ii <= 10; ++ii) { - const auto& message = checker.get_message(ii); - const std::string n_value = std::to_string(ii + 10); - CHECK_THAT( - message, Contains(std::string("CHECK: nn=") + n_value + " ; Error!!")); - // The function "get_number" threw, but this was NOT picked up - // by the macro. Never mind. - CHECK_THAT(message, !Contains("ssertion")); - } - CHECK( - values_of_nn_with_error == - std::vector{3, 4, 5, 8, 9, 10, 16, 17, 18, 19, 20}); - checker.final_checks(); -} - -// Throws for nn in [2,5] or [8,10] with message. -static int get_number_with_asserts(int nn) { - TKET_ASSERT_WITH_THROW((nn - 2) * (nn - 5) > 0); - - TKET_ASSERT_WITH_THROW( - (nn - 8) * (nn - 10) > 0 || AssertMessage() << "N=" << nn << ": second"); - - return nn + 5; -} - -SCENARIO("Asserts with throws within calls") { - std::vector calc_messages; - std::vector values_of_nn_with_error; - for (int nn = 0; nn <= 30; ++nn) { - try { - // Throws for [2,5] or [8,10]. - const int mm = get_number_with_asserts(nn); - - // Throws for mm=15,16, so nn=10,11, - // but NOT for 10 because of the above! So only for nn=11. - TKET_ASSERT_WITH_THROW(!(mm >= 15 && mm <= 16)); - - // Throws for [26,30], since mm=n+5. - TKET_ASSERT_WITH_THROW( - mm <= 30 || AssertMessage() << "N=" << nn << ", M=" << mm); - - // Should throw from nn-10, so [12,15] or [18,20] (with message). - TKET_ASSERT_WITH_THROW(get_number_with_asserts(nn - 10) >= nn - 5); - - // Should throw from nn-15, so [17,20] - // (except that [18,20] are covered above, so nn=17 only) - // or [23,25]. - TKET_ASSERT_WITH_THROW( - get_number_with_asserts(nn - 15) >= nn - 10 || - AssertMessage() << "assert with N=" << nn); - } catch (const std::exception& e) { - values_of_nn_with_error.push_back(nn); - std::stringstream ss; - ss << "CHECK: nn=" << nn << " ; " << e.what(); - calc_messages.emplace_back(ss.str()); - } - } - CHECK(calc_messages.size() == 24); - - // Every error is thrown by TKET_ASSERT_WITH_THROW, - // so should have the filename. - CHECK( - get_message_indices_with_filename(calc_messages) == - std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}); - - MessageChecker checker(calc_messages); - - for (int ii = 0; ii <= 3; ++ii) { - const auto& message = checker.get_message(ii); - CHECK_THAT( - message, - Contains(std::string("CHECK: nn=") + std::to_string(ii + 2) + " ; ")); - // comes from "get_number_with_asserts" - CHECK_THAT(message, Contains("Assertion '(nn - 2) * (nn - 5) > 0'")); - // The function name: the assert macro was inside the function. - CHECK_THAT(message, Contains("get_number_with_asserts")); - } - for (int ii = 4; ii <= 6; ++ii) { - const auto& message = checker.get_message(ii); - const auto n_value = std::to_string(ii + 4); - CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); - // comes from "get_number_with_asserts" - CHECK_THAT(message, Contains("Assertion")); - // the function name - CHECK_THAT(message, Contains("get_number_with_asserts")); - CHECK_THAT(message, Contains(std::string("'N=") + n_value + ": second'")); - - // comes from the second assert in the function, without a message. - CHECK_THAT(message, !Contains("(nn - 2) * (nn - 5)")); - } - { - const auto& message = checker.get_message(7); - CHECK_THAT(message, Contains("CHECK: nn=11 ; ")); - CHECK_THAT(message, Contains("Assertion '!(mm >= 15 && mm <= 16)'")); - - CHECK_THAT(message, !Contains("get_number_with_asserts")); - } - for (int ii = 8; ii <= 11; ++ii) { - const auto& message = checker.get_message(ii); - const auto n_value = std::to_string(ii + 4); - CHECK_THAT(message, Contains(std::string("CHECK: nn=") + n_value + " ; ")); - CHECK_THAT(message, Contains("get_number_with_asserts")); - CHECK_THAT(message, Contains("Assertion '(nn - 2) * (nn - 5) > 0'")); - // The throw within "get_number_with_asserts" was not picked up - // by the macro, so the macro code is not present in the error message. - CHECK_THAT(message, !Contains("AssertMessage()")); - } - { - const auto& message = checker.get_message(12); - CHECK_THAT( - message, - Contains("CHECK: nn=17 ; Assertion '(nn - 2) * (nn - 5) > 0'")); - } - for (int ii = 13; ii <= 15; ++ii) { - const auto& message = checker.get_message(ii); - CHECK_THAT( - message, - Contains( - std::string("CHECK: nn=") + std::to_string(ii + 5) + - " ; Assertion '(nn - 8) * (nn - 10) > 0 || AssertMessage() <<")); - CHECK_THAT( - message, - Contains(std::string("'N=") + std::to_string(ii - 5) + ": second")); - - CHECK_THAT(message, !Contains("(nn - 2) * (nn - 5)")); - } - for (int ii = 16; ii <= 18; ++ii) { - const auto& message = checker.get_message(ii); - CHECK_THAT( - message, - Contains( - std::string("CHECK: nn=") + std::to_string(ii + 7) + - " ; Assertion '(nn - 8) * (nn - 10) > 0 || AssertMessage() <<")); - CHECK_THAT( - message, - Contains(std::string("'N=") + std::to_string(ii - 8) + ": second")); - - CHECK_THAT(message, !Contains("(nn - 2) * (nn - 5)")); - } - for (int ii = 19; ii <= 23; ++ii) { - const auto& message = checker.get_message(ii); - const auto n_value = std::to_string(ii + 7); - CHECK_THAT( - message, Contains( - std::string("CHECK: nn=") + n_value + - " ; Assertion 'mm <= 30 || AssertMessage() << ")); - CHECK_THAT(message, Contains("Assertion ")); - CHECK_THAT(message, Contains("failed: ")); - CHECK_THAT( - message, Contains("'N=" + n_value + ", M=" + std::to_string(ii + 12))); - - CHECK_THAT(message, !Contains("Evaluating assertion condition")); - CHECK_THAT(message, !Contains("get_number_with_asserts")); - CHECK_THAT(message, !Contains("threw unexpected exception")); - CHECK_THAT(message, !Contains("Assertion()")); - CHECK_THAT(message, !Contains("(nn - 2) * (nn - 5)")); - } - CHECK(values_of_nn_with_error == std::vector{2, 3, 4, 5, 8, 9, - 10, 11, 12, 13, 14, 15, - 17, 18, 19, 20, 23, 24, - 25, 26, 27, 28, 29, 30}); - checker.final_checks(); -} - -SCENARIO("Asserts with various bool conversions") { - // First, list things which do throw. - bool throws = true; - try { - TKET_ASSERT_WITH_THROW(!""); - throws = false; - } catch (const std::exception&) { - } - CHECK(throws); - - throws = true; - try { - TKET_ASSERT_WITH_THROW(0); - throws = false; - } catch (const std::exception&) { - } - CHECK(throws); - - int xx = 1; - try { - // Now, list non-throwing things first. - TKET_ASSERT_WITH_THROW(""); - ++xx; - TKET_ASSERT_WITH_THROW("aaaaa"); - ++xx; - TKET_ASSERT_WITH_THROW(xx); - ++xx; - TKET_ASSERT_WITH_THROW(true); - ++xx; - TKET_ASSERT_WITH_THROW(-1); - ++xx; - TKET_ASSERT_WITH_THROW(xx > 0); - ++xx; - // Throws - TKET_ASSERT_WITH_THROW(!"bbbbb"); - xx *= 1000; - } catch (const std::exception&) { - xx *= 100; - } - CHECK(xx == 700); -} - -} // namespace tket diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index b496083faa..4fcdd0481e 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -25,7 +25,6 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/CircuitsForTesting.cpp ${TKET_TESTS_DIR}/Utils/test_MatrixAnalysis.cpp ${TKET_TESTS_DIR}/Utils/test_CosSinDecomposition.cpp - ${TKET_TESTS_DIR}/Utils/test_TketAssertWithThrow.cpp ${TKET_TESTS_DIR}/Graphs/EdgeSequence.cpp ${TKET_TESTS_DIR}/Graphs/EdgeSequenceColouringParameters.cpp ${TKET_TESTS_DIR}/Graphs/GraphTestingRoutines.cpp From 032f64e2b59ee44eefc3964e37e98969fc6b3555 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 7 Feb 2022 17:56:58 +0000 Subject: [PATCH 046/146] Remove AssertMessage(), add TKET_ASSERT_WITH_MESSAGE --- tket/src/Graphs/AdjacencyData.cpp | 33 +++--- .../src/TokenSwapping/ArchitectureMapping.cpp | 44 ++++---- .../DistancesFromArchitecture.cpp | 17 ++- .../NeighboursFromArchitecture.cpp | 18 ++-- .../src/TokenSwapping/RiverFlowPathFinder.cpp | 24 ++--- .../TSAUtils/VertexMappingFunctions.cpp | 11 +- .../VectorListHybridSkeleton.cpp | 9 +- tket/src/Utils/AssertMessage.cpp | 41 ------- tket/src/Utils/CMakeLists.txt | 1 - tket/src/Utils/include/Utils/Assert.hpp | 100 +++++++++--------- .../src/Utils/include/Utils/AssertMessage.hpp | 67 ------------ 11 files changed, 123 insertions(+), 242 deletions(-) delete mode 100644 tket/src/Utils/AssertMessage.cpp delete mode 100644 tket/src/Utils/include/Utils/AssertMessage.hpp diff --git a/tket/src/Graphs/AdjacencyData.cpp b/tket/src/Graphs/AdjacencyData.cpp index 7589513c6e..56708058ce 100644 --- a/tket/src/Graphs/AdjacencyData.cpp +++ b/tket/src/Graphs/AdjacencyData.cpp @@ -65,10 +65,9 @@ string AdjacencyData::to_string() const { const set& AdjacencyData::get_neighbours( std::size_t vertex) const { - TKET_ASSERT( - vertex < m_cleaned_data.size() || - AssertMessage() - << "AdjacencyData: get_neighbours called with invalid vertex " + TKET_ASSERT_WITH_MESSAGE( + vertex < m_cleaned_data.size(), + "AdjacencyData: get_neighbours called with invalid vertex " << vertex << "; there are only " << m_cleaned_data.size() << " vertices"); return m_cleaned_data[vertex]; @@ -101,11 +100,11 @@ bool AdjacencyData::add_edge(std::size_t i, std::size_t j) { } bool AdjacencyData::edge_exists(std::size_t i, std::size_t j) const { - TKET_ASSERT( - (i < m_cleaned_data.size() && j < m_cleaned_data.size()) || - AssertMessage() << "edge_exists called with vertices " << i << ", " << j - << ", but there are only " << m_cleaned_data.size() - << " vertices"); + TKET_ASSERT_WITH_MESSAGE( + (i < m_cleaned_data.size() && j < m_cleaned_data.size()), + "edge_exists called with vertices " + << i << ", " << j << ", but there are only " << m_cleaned_data.size() + << " vertices"); return m_cleaned_data[i].count(j) != 0; } @@ -143,14 +142,14 @@ AdjacencyData::AdjacencyData( for (std::size_t i = 0; i < m_cleaned_data.size(); ++i) { for (std::size_t j : raw_data[i]) { - TKET_ASSERT( - i != j || allow_loops || - AssertMessage() << "vertex " << i << " out of " - << m_cleaned_data.size() << " has a loop."); - TKET_ASSERT( - j < m_cleaned_data.size() || - AssertMessage() << "vertex " << i << " has illegal neighbour vertex " - << j << ", the size is " << m_cleaned_data.size()); + TKET_ASSERT_WITH_MESSAGE( + i != j || allow_loops, "Vertex " << i << " out of " + << m_cleaned_data.size() + << " has a loop."); + TKET_ASSERT_WITH_MESSAGE( + j < m_cleaned_data.size(), + "Vertex " << i << " has illegal neighbour vertex " << j + << ", the size is " << m_cleaned_data.size()); m_cleaned_data[i].insert(j); m_cleaned_data[j].insert(i); } diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index 4fa444a17c..8f0b9679b5 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -34,10 +34,10 @@ ArchitectureMapping::ArchitectureMapping(const Architecture& arch) const auto& node = m_vertex_to_node_mapping[ii]; { const auto citer = m_node_to_vertex_mapping.find(node); - TKET_ASSERT( - citer == m_node_to_vertex_mapping.cend() || - AssertMessage() << "Duplicate node " << node.repr() << " at vertices " - << citer->second << ", " << ii); + TKET_ASSERT_WITH_MESSAGE( + citer == m_node_to_vertex_mapping.cend(), + "Duplicate node " << node.repr() << " at vertices " << citer->second + << ", " << ii); } m_node_to_vertex_mapping[node] = ii; } @@ -68,19 +68,19 @@ ArchitectureMapping::ArchitectureMapping( // Check that the nodes agree with the architecture object. const auto uids = arch.nodes(); - TKET_ASSERT( - uids.size() == m_vertex_to_node_mapping.size() || - AssertMessage() << "passed in " << edges.size() << " edges, giving " - << m_vertex_to_node_mapping.size() - << " vertices; but the architecture object has " - << uids.size() << " vertices"); + TKET_ASSERT_WITH_MESSAGE( + uids.size() == m_vertex_to_node_mapping.size(), + "passed in " << edges.size() << " edges, giving " + << m_vertex_to_node_mapping.size() + << " vertices; but the architecture object has " + << uids.size() << " vertices"); for (const UnitID& uid : uids) { const Node node(uid); - TKET_ASSERT( - m_node_to_vertex_mapping.count(node) != 0 || - AssertMessage() - << "passed in " << edges.size() << " edges, giving " + TKET_ASSERT_WITH_MESSAGE( + m_node_to_vertex_mapping.count(node) != 0, + "passed in " + << edges.size() << " edges, giving " << m_vertex_to_node_mapping.size() << " vertices; but the architecture object has an unknown node " << node.repr()); @@ -93,21 +93,19 @@ size_t ArchitectureMapping::number_of_vertices() const { const Node& ArchitectureMapping::get_node(size_t vertex) const { const auto num_vertices = number_of_vertices(); - TKET_ASSERT( - vertex < num_vertices || AssertMessage() - << "get_node: invalid vertex " << vertex - << " (architecture only has " << num_vertices - << " vertices)"); + TKET_ASSERT_WITH_MESSAGE( + vertex < num_vertices, "invalid vertex " << vertex + << " (architecture only has " + << num_vertices << " vertices)"); return m_vertex_to_node_mapping[vertex]; } size_t ArchitectureMapping::get_vertex(const Node& node) const { const auto citer = m_node_to_vertex_mapping.find(node); - TKET_ASSERT( - citer != m_node_to_vertex_mapping.cend() || - AssertMessage() << "get_vertex: node " << node.repr() - << " has no vertex number"); + TKET_ASSERT_WITH_MESSAGE( + citer != m_node_to_vertex_mapping.cend(), + "node " << node.repr() << " has no vertex number"); return citer->second; } diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp index 7766f6c9d1..e66f04a191 100644 --- a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp @@ -74,15 +74,14 @@ size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { // architectures, since get_distance now should throw if v1, v2 are in // different connected components. However, leave the check in, in case some // other bizarre error causes distance zero to be returned. - TKET_ASSERT( - distance_entry > 0 || - AssertMessage() << "DistancesFromArchitecture: architecture has " - << arch.n_nodes() << " vertices, " - << arch.n_connections() << " edges; returned diameter " - << arch.get_diameter() << ", but d(" << vertex1 << "," - << vertex2 - << ")=0. " - "Is the graph connected?"); + TKET_ASSERT_WITH_MESSAGE( + distance_entry > 0, + "DistancesFromArchitecture: architecture has " + << arch.n_nodes() << " vertices, " << arch.n_connections() + << " edges; returned diameter " << arch.get_diameter() << " and d(" + << vertex1 << "," << vertex2 + << ")=0. " + "Is the graph connected?"); } return distance_entry; } diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp index 8e4ec6f287..f764da2073 100644 --- a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp @@ -28,10 +28,10 @@ NeighboursFromArchitecture::NeighboursFromArchitecture( const std::vector& NeighboursFromArchitecture::operator()( size_t vertex) { const auto num_vertices = m_arch_mapping.number_of_vertices(); - TKET_ASSERT( - vertex < num_vertices || - AssertMessage() << "get_neighbours: invalid vertex " << vertex - << " (only have " << num_vertices << " vertices)"); + TKET_ASSERT_WITH_MESSAGE( + vertex < num_vertices, "get_neighbours: invalid vertex " + << vertex << " (only have " << num_vertices + << " vertices)"); auto& neighbours = m_cached_neighbours[vertex]; if (!neighbours.empty()) { @@ -50,11 +50,11 @@ const std::vector& NeighboursFromArchitecture::operator()( for (const Node& node : neighbour_nodes) { const auto neighbour_vertex = m_arch_mapping.get_vertex(node); - TKET_ASSERT( - neighbour_vertex != vertex || - AssertMessage() - << "get_neighbours: vertex " << vertex << " for node " - << node.repr() << " has " << neighbour_nodes.size() + TKET_ASSERT_WITH_MESSAGE( + neighbour_vertex != vertex, + "get_neighbours: vertex " + << vertex << " for node " << node.repr() << " has " + << neighbour_nodes.size() << " neighbours, and lists itself as a neighbour (loops not " "allowed)"); neighbours.push_back(neighbour_vertex); diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp index 5be5d43740..4042dd8add 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -114,20 +114,20 @@ void RiverFlowPathFinder::Impl::grow_path( candidate_moves.back().count = edge_count; continue; } - TKET_ASSERT( + TKET_ASSERT_WITH_MESSAGE( neighbour_distance_to_target == remaining_distance || - neighbour_distance_to_target == remaining_distance + 1 || - AssertMessage() << "d(v_" << path.back() << ", v_" << target_vertex - << ")=" << remaining_distance << ". But v_" - << path.back() << " has neighbour v_" << neighbour - << ", at distance " << neighbour_distance_to_target - << " to the target v_" << target_vertex); + neighbour_distance_to_target == remaining_distance + 1, + "d(v_" << path.back() << ", v_" << target_vertex + << ")=" << remaining_distance << ". But v_" << path.back() + << " has neighbour v_" << neighbour << ", at distance " + << neighbour_distance_to_target << " to the target v_" + << target_vertex); } - TKET_ASSERT( - !candidate_moves.empty() || - AssertMessage() << "No neighbours of v_" << path.back() - << " at correct distance " << remaining_distance - 1 - << " to target vertex v_" << target_vertex); + TKET_ASSERT_WITH_MESSAGE( + !candidate_moves.empty(), "No neighbours of v_" + << path.back() << " at correct distance " + << remaining_distance - 1 + << " to target vertex v_" << target_vertex); const auto& choice = rng.get_element(candidate_moves); path.push_back(choice.end_vertex); diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index e60ab2c213..e9d665dbed 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -36,12 +36,11 @@ void check_mapping( const VertexMapping& vertex_mapping, VertexMapping& work_mapping) { work_mapping.clear(); for (const auto& entry : vertex_mapping) { - TKET_ASSERT( - work_mapping.count(entry.second) == 0 || - AssertMessage() << "Vertices v_" << entry.first << " and v_" - << work_mapping[entry.second] - << " both have the same target vertex v_" - << entry.second); + TKET_ASSERT_WITH_MESSAGE( + work_mapping.count(entry.second) == 0, + "Vertices v_" << entry.first << " and v_" << work_mapping[entry.second] + << " both have the same target vertex v_" + << entry.second); work_mapping[entry.second] = entry.first; } diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp index a0f3e350e9..8669e38d30 100644 --- a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -161,12 +161,11 @@ void VectorListHybridSkeleton::erase_interval( for (size_t nn = 1; nn < number_of_elements; ++nn) { last_element_index = m_links.at(last_element_index).next; - TKET_ASSERT( - last_element_index < m_links.size() || - AssertMessage() - << "VectorListHybridSkeleton::erase_interval with start index " + TKET_ASSERT_WITH_MESSAGE( + last_element_index < m_links.size(), + "erase_interval with start index " << index << ", number_of_elements=" << number_of_elements - << ", size " << m_links.size() << ", run out of elements at N=" + << ", size " << m_links.size() << ", runs out of elements at N=" << nn << " (got index " << last_element_index << ")"); } TKET_ASSERT(number_of_elements <= m_size); diff --git a/tket/src/Utils/AssertMessage.cpp b/tket/src/Utils/AssertMessage.cpp deleted file mode 100644 index 465740de1b..0000000000 --- a/tket/src/Utils/AssertMessage.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "AssertMessage.hpp" - -namespace tket { - -// GCOVR_EXCL_START -AssertMessage::AssertMessage() {} - -std::string AssertMessage::get_error_message() { - const auto message = get_error_stream().str(); - - // Clear the global stream, ready for the next message - // (currently this isn't necessary, because tket assert - // immediately aborts; but it may become necessary again in future, - // if we have assert variants with throws and multiple try/catch). - get_error_stream().str(std::string()); - return message; -} - -AssertMessage::operator bool() const { return false; } - -std::stringstream& AssertMessage::get_error_stream() { - static std::stringstream ss; - return ss; -} -// GCOVR_EXCL_STOP - -} // namespace tket diff --git a/tket/src/Utils/CMakeLists.txt b/tket/src/Utils/CMakeLists.txt index e937647ec5..c2c734093a 100644 --- a/tket/src/Utils/CMakeLists.txt +++ b/tket/src/Utils/CMakeLists.txt @@ -19,7 +19,6 @@ if (NOT ${COMP} STREQUAL "Utils") endif() add_library(tket-${COMP} - AssertMessage.cpp TketLog.cpp UnitID.cpp HelperFunctions.cpp diff --git a/tket/src/Utils/include/Utils/Assert.hpp b/tket/src/Utils/include/Utils/Assert.hpp index 45724fc939..d66f42b369 100644 --- a/tket/src/Utils/include/Utils/Assert.hpp +++ b/tket/src/Utils/include/Utils/Assert.hpp @@ -17,68 +17,64 @@ #include #include -#include "AssertMessage.hpp" #include "TketLog.hpp" /** - * If the condition `b` is not satisfied, log a diagnostic message and abort. - * But note that the message includes only the raw C++ source code for b, - * not the actual values of x,y in conditions like "xcritical(msg.str()); \ - std::abort(); \ - } \ - } catch (const std::exception& e2) { \ - std::stringstream msg; \ - msg << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ - << " : " << __func__ << " : " << __LINE__ \ - << ") threw unexpected exception: '" << e2.what() << "': aborting."; \ - tket::tket_log()->critical(msg.str()); \ - std::abort(); \ - } catch (...) { \ - std::stringstream msg; \ - msg << "Evaluating assertion condition '" << #b << "' (" << __FILE__ \ - << " : " << __func__ << " : " << __LINE__ \ - << ") threw unknown exception. Aborting."; \ - tket::tket_log()->critical(msg.str()); \ - std::abort(); \ - } \ +#define TKET_ASSERT_WITH_MESSAGE(condition, msg) \ + /* GCOVR_EXCL_START */ \ + do { \ + try { \ + if (!(condition)) { \ + std::stringstream ss; \ + ss << "Assertion '" << #condition << "' (" << __FILE__ << " : " \ + << __func__ << " : " << __LINE__ << ") failed. " << msg \ + << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } \ + } catch (const std::exception& ex) { \ + std::stringstream ss; \ + ss << "Evaluating assertion condition '" << #condition << "' (" \ + << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") threw unexpected exception: '" << ex.what() << "'. " << msg \ + << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } catch (...) { \ + std::stringstream ss; \ + ss << "Evaluating assertion condition '" << #condition << "' (" \ + << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") Threw unknown exception. " << msg << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } \ } while (0) /* GCOVR_EXCL_STOP */ + +#define TKET_ASSERT(condition) \ + do { \ + TKET_ASSERT_WITH_MESSAGE(condition, ""); \ + } while (0) diff --git a/tket/src/Utils/include/Utils/AssertMessage.hpp b/tket/src/Utils/include/Utils/AssertMessage.hpp deleted file mode 100644 index 38779fc838..0000000000 --- a/tket/src/Utils/include/Utils/AssertMessage.hpp +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include - -namespace tket { - -// GCOVR_EXCL_START -/** This is only for use with TKET_ASSERT, when we want to give a more detailed - * error message than just the assertion code and location. - * Also, some code might seem strange, but that's because exceptions - * can generate many extra branches in test coverage, see - * - * https://stackoverflow.com/questions/42003783/ - * lcov-gcov-branch-coverage-with-c-producing-branches-all-over-the-place?rq=1 - * - * Thus, we avoid exceptions. - */ -class AssertMessage { - public: - /** Construct the object, to begin writing to the stream. */ - AssertMessage(); - - /** Always returns false, so that "... || AssertMessage() << a)" - * becomes "... || false)". - */ - operator bool() const; - - /** Every streamable object x can be written to the stream. */ - template - const AssertMessage& operator<<(const T& x) const { - get_error_stream() << x; - return *this; - } - - /** Get the stored error message. Of course, if AssertMessage() - * has not actually been called, just returns an empty string. - * Also, clears the stored message, ready for the next time. - */ - static std::string get_error_message(); - - private: - /** Previously the error message for later use by TKET_ASSERT macros - * was passed on by exceptions within operator bool(), but that - * generated lots of code coverage branching problems. - * So now we use a global variable. The AssertMessage object - * will go out of scope, so there seems to be no other good way - * to pass the information on. - */ - static std::stringstream& get_error_stream(); -}; -// GCOVR_EXCL_STOP - -} // namespace tket From 52f221ce43535fb6a1a6c829f67e454742f95b52 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 8 Feb 2022 10:50:17 +0000 Subject: [PATCH 047/146] manually add coverage exclusion tags for now, until the branching problem is fixed --- tket/src/Graphs/AdjacencyData.cpp | 6 ++++++ tket/src/Mapping/LexiRoute.cpp | 12 +++++++----- tket/src/TokenSwapping/ArchitectureMapping.cpp | 10 ++++++++++ tket/src/TokenSwapping/CyclesGrowthManager.cpp | 2 ++ tket/src/TokenSwapping/DistancesFromArchitecture.cpp | 2 ++ .../src/TokenSwapping/NeighboursFromArchitecture.cpp | 5 ++++- tket/src/TokenSwapping/RiverFlowPathFinder.cpp | 4 ++++ tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp | 3 ++- .../TSAUtils/VertexMappingFunctions.cpp | 3 ++- .../TableLookup/CanonicalRelabelling.cpp | 3 ++- .../TokenSwapping/TableLookup/ExactMappingLookup.cpp | 8 +++++--- .../TableLookup/PartialMappingLookup.cpp | 7 +++++-- .../TableLookup/SwapListSegmentOptimiser.cpp | 6 ++++++ tket/src/TokenSwapping/TrivialTSA.cpp | 8 ++++++++ tket/src/TokenSwapping/VectorListHybridSkeleton.cpp | 2 ++ .../include/TokenSwapping/VectorListHybrid.hpp | 2 ++ tket/src/Utils/include/Utils/Assert.hpp | 8 +++++--- 17 files changed, 74 insertions(+), 17 deletions(-) diff --git a/tket/src/Graphs/AdjacencyData.cpp b/tket/src/Graphs/AdjacencyData.cpp index 56708058ce..85ab1110c4 100644 --- a/tket/src/Graphs/AdjacencyData.cpp +++ b/tket/src/Graphs/AdjacencyData.cpp @@ -65,11 +65,13 @@ string AdjacencyData::to_string() const { const set& AdjacencyData::get_neighbours( std::size_t vertex) const { + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( vertex < m_cleaned_data.size(), "AdjacencyData: get_neighbours called with invalid vertex " << vertex << "; there are only " << m_cleaned_data.size() << " vertices"); + // GCOVR_EXCL_STOP return m_cleaned_data[vertex]; } @@ -100,11 +102,13 @@ bool AdjacencyData::add_edge(std::size_t i, std::size_t j) { } bool AdjacencyData::edge_exists(std::size_t i, std::size_t j) const { + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( (i < m_cleaned_data.size() && j < m_cleaned_data.size()), "edge_exists called with vertices " << i << ", " << j << ", but there are only " << m_cleaned_data.size() << " vertices"); + // GCOVR_EXCL_STOP return m_cleaned_data[i].count(j) != 0; } @@ -142,6 +146,7 @@ AdjacencyData::AdjacencyData( for (std::size_t i = 0; i < m_cleaned_data.size(); ++i) { for (std::size_t j : raw_data[i]) { + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( i != j || allow_loops, "Vertex " << i << " out of " << m_cleaned_data.size() @@ -150,6 +155,7 @@ AdjacencyData::AdjacencyData( j < m_cleaned_data.size(), "Vertex " << i << " has illegal neighbour vertex " << j << ", the size is " << m_cleaned_data.size()); + // GCOVR_EXCL_STOP m_cleaned_data[i].insert(j); m_cleaned_data[j].insert(i); } diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index dfccd3b056..f660928cc9 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -327,11 +327,13 @@ std::pair LexiRoute::check_bridge( const std::pair LexiRoute::pair_distances( const Node& p0_first, const Node& p0_second, const Node& p1_first, const Node& p1_second) const { - TKET_ASSERT( - this->architecture_->node_exists(p0_first) && - this->architecture_->node_exists(p0_second) && - this->architecture_->node_exists(p1_first) && - this->architecture_->node_exists(p1_second)); + { + const bool valid = this->architecture_->node_exists(p0_first) && + this->architecture_->node_exists(p0_second) && + this->architecture_->node_exists(p1_first) && + this->architecture_->node_exists(p1_second); + TKET_ASSERT(valid); + } size_t curr_dist1 = this->architecture_->get_distance(p0_first, p0_second); size_t curr_dist2 = this->architecture_->get_distance(p1_first, p1_second); return (curr_dist1 > curr_dist2) ? std::make_pair(curr_dist1, curr_dist2) diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index 8f0b9679b5..7f6e08dbfc 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -34,10 +34,12 @@ ArchitectureMapping::ArchitectureMapping(const Architecture& arch) const auto& node = m_vertex_to_node_mapping[ii]; { const auto citer = m_node_to_vertex_mapping.find(node); + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( citer == m_node_to_vertex_mapping.cend(), "Duplicate node " << node.repr() << " at vertices " << citer->second << ", " << ii); + // GCOVR_EXCL_STOP } m_node_to_vertex_mapping[node] = ii; } @@ -68,15 +70,18 @@ ArchitectureMapping::ArchitectureMapping( // Check that the nodes agree with the architecture object. const auto uids = arch.nodes(); + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( uids.size() == m_vertex_to_node_mapping.size(), "passed in " << edges.size() << " edges, giving " << m_vertex_to_node_mapping.size() << " vertices; but the architecture object has " << uids.size() << " vertices"); + // GCOVR_EXCL_STOP for (const UnitID& uid : uids) { const Node node(uid); + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( m_node_to_vertex_mapping.count(node) != 0, "passed in " @@ -84,6 +89,7 @@ ArchitectureMapping::ArchitectureMapping( << m_vertex_to_node_mapping.size() << " vertices; but the architecture object has an unknown node " << node.repr()); + // GCOVR_EXCL_STOP } } @@ -93,19 +99,23 @@ size_t ArchitectureMapping::number_of_vertices() const { const Node& ArchitectureMapping::get_node(size_t vertex) const { const auto num_vertices = number_of_vertices(); + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( vertex < num_vertices, "invalid vertex " << vertex << " (architecture only has " << num_vertices << " vertices)"); + // GCOVR_EXCL_STOP return m_vertex_to_node_mapping[vertex]; } size_t ArchitectureMapping::get_vertex(const Node& node) const { const auto citer = m_node_to_vertex_mapping.find(node); + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( citer != m_node_to_vertex_mapping.cend(), "node " << node.repr() << " has no vertex number"); + // GCOVR_EXCL_STOP return citer->second; } diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.cpp b/tket/src/TokenSwapping/CyclesGrowthManager.cpp index 6ed8db9695..894494e5d0 100644 --- a/tket/src/TokenSwapping/CyclesGrowthManager.cpp +++ b/tket/src/TokenSwapping/CyclesGrowthManager.cpp @@ -39,8 +39,10 @@ CyclesGrowthManager::Options& CyclesGrowthManager::get_options() { const Cycles& CyclesGrowthManager::get_cycles( bool throw_if_cycles_are_not_candidates) const { + // GCOVR_EXCL_START TKET_ASSERT( !(throw_if_cycles_are_not_candidates && !m_cycles_are_candidates)); + // GCOVR_EXCL_STOP return m_cycles; } diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp index e66f04a191..e8c629c87c 100644 --- a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp @@ -74,6 +74,7 @@ size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { // architectures, since get_distance now should throw if v1, v2 are in // different connected components. However, leave the check in, in case some // other bizarre error causes distance zero to be returned. + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( distance_entry > 0, "DistancesFromArchitecture: architecture has " @@ -82,6 +83,7 @@ size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { << vertex1 << "," << vertex2 << ")=0. " "Is the graph connected?"); + // GCOVR_EXCL_STOP } return distance_entry; } diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp index f764da2073..d93cfc8b13 100644 --- a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp @@ -28,11 +28,12 @@ NeighboursFromArchitecture::NeighboursFromArchitecture( const std::vector& NeighboursFromArchitecture::operator()( size_t vertex) { const auto num_vertices = m_arch_mapping.number_of_vertices(); + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( vertex < num_vertices, "get_neighbours: invalid vertex " << vertex << " (only have " << num_vertices << " vertices)"); - + // GCOVR_EXCL_STOP auto& neighbours = m_cached_neighbours[vertex]; if (!neighbours.empty()) { // Already cached. @@ -50,6 +51,7 @@ const std::vector& NeighboursFromArchitecture::operator()( for (const Node& node : neighbour_nodes) { const auto neighbour_vertex = m_arch_mapping.get_vertex(node); + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( neighbour_vertex != vertex, "get_neighbours: vertex " @@ -57,6 +59,7 @@ const std::vector& NeighboursFromArchitecture::operator()( << neighbour_nodes.size() << " neighbours, and lists itself as a neighbour (loops not " "allowed)"); + // GCOVR_EXCL_STOP neighbours.push_back(neighbour_vertex); } std::sort(neighbours.begin(), neighbours.end()); diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp index 4042dd8add..e80ab7516a 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -114,6 +114,7 @@ void RiverFlowPathFinder::Impl::grow_path( candidate_moves.back().count = edge_count; continue; } + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( neighbour_distance_to_target == remaining_distance || neighbour_distance_to_target == remaining_distance + 1, @@ -122,12 +123,15 @@ void RiverFlowPathFinder::Impl::grow_path( << " has neighbour v_" << neighbour << ", at distance " << neighbour_distance_to_target << " to the target v_" << target_vertex); + // GCOVR_EXCL_STOP } + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( !candidate_moves.empty(), "No neighbours of v_" << path.back() << " at correct distance " << remaining_distance - 1 << " to target vertex v_" << target_vertex); + // GCOVR_EXCL_STOP const auto& choice = rng.get_element(candidate_moves); path.push_back(choice.end_vertex); diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp index 3566cafd4c..dd75e1c98f 100644 --- a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp @@ -24,9 +24,10 @@ namespace tsa_internal { std::set get_random_set( RNG& rng, size_t sample_size, size_t population_size) { + // GCOVR_EXCL_START TKET_ASSERT( sample_size <= population_size || !"get_random_set: sample too large"); - + // GCOVR_EXCL_STOP std::set result; if (sample_size == 0 || population_size == 0) { return result; diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index e9d665dbed..8b812d53eb 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -36,12 +36,13 @@ void check_mapping( const VertexMapping& vertex_mapping, VertexMapping& work_mapping) { work_mapping.clear(); for (const auto& entry : vertex_mapping) { + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( work_mapping.count(entry.second) == 0, "Vertices v_" << entry.first << " and v_" << work_mapping[entry.second] << " both have the same target vertex v_" << entry.second); - + // GCOVR_EXCL_STOP work_mapping[entry.second] = entry.first; } } diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp index e26a60b976..1a58535764 100644 --- a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp @@ -109,10 +109,11 @@ const CanonicalRelabelling::Result& CanonicalRelabelling::operator()( for (unsigned ii = 0; ii < m_result.new_to_old_vertices.size(); ++ii) { m_result.old_to_new_vertices[m_result.new_to_old_vertices[ii]] = ii; } + // GCOVR_EXCL_START TKET_ASSERT( m_result.new_to_old_vertices.size() == m_result.old_to_new_vertices.size()); - + // GCOVR_EXCL_STOP // And finally, the permutation hash. m_result.permutation_hash = 0; for (auto ii : m_sorted_cycles_indices) { diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp index d6c5f96c07..9b75744ae6 100644 --- a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp @@ -63,9 +63,11 @@ ExactMappingLookup::improve_upon_existing_result( return m_result; } TKET_ASSERT(relabelling.permutation_hash != 0); - TKET_ASSERT( - relabelling.new_to_old_vertices.size() == - relabelling.old_to_new_vertices.size()); + { + const bool size_match = relabelling.new_to_old_vertices.size() == + relabelling.old_to_new_vertices.size(); + TKET_ASSERT(size_match); + } TKET_ASSERT(relabelling.new_to_old_vertices.size() >= 2); fill_result_from_table(relabelling, edges, max_number_of_swaps); diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp index 891e80770a..cc7c6a5dcb 100644 --- a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp @@ -54,8 +54,11 @@ const ExactMappingLookup::Result& PartialMappingLookup::operator()( // For next_permutation, let's permute the empty SOURCE vertices. // They are already sorted, thus already at the first permutation // in the ordering, because they came from the keys of desired_mapping. - TKET_ASSERT(std::next_permutation( - m_empty_source_vertices.begin(), m_empty_source_vertices.end())); + { + const bool next_permutation = std::next_permutation( + m_empty_source_vertices.begin(), m_empty_source_vertices.end()); + TKET_ASSERT(next_permutation); + } m_altered_mapping = desired_mapping; for (unsigned perm_count = 0;;) { diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp index 9fa6286e5a..3a36792882 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp @@ -79,8 +79,10 @@ SwapListSegmentOptimiser::optimise_segment( bool should_store = m_output.initial_segment_size == 0; if (!should_store) { // Something IS stored, but is our new solution better? + // GCOVR_EXCL_START TKET_ASSERT( m_output.initial_segment_size >= m_best_optimised_swaps.size()); + // GCOVR_EXCL_STOP const size_t current_decrease = m_output.initial_segment_size - m_best_optimised_swaps.size(); TKET_ASSERT(current_number_of_swaps >= lookup_result.swaps.size()); @@ -150,9 +152,11 @@ void SwapListSegmentOptimiser::fill_final_output_and_swaplist( initial_id, m_best_optimised_swaps.cbegin(), m_best_optimised_swaps.cend()); + // GCOVR_EXCL_START TKET_ASSERT( overwrite_result.number_of_overwritten_elements == m_best_optimised_swaps.size()); + // GCOVR_EXCL_STOP m_output.new_segment_last_id = overwrite_result.final_overwritten_element_id; @@ -166,9 +170,11 @@ void SwapListSegmentOptimiser::fill_final_output_and_swaplist( next_id_opt.value(), remaining_elements_to_erase); } } + // GCOVR_EXCL_START TKET_ASSERT( swap_list.size() + m_output.initial_segment_size == initial_size + m_output.final_segment_size); + // GCOVR_EXCL_STOP } } // namespace tsa_internal diff --git a/tket/src/TokenSwapping/TrivialTSA.cpp b/tket/src/TokenSwapping/TrivialTSA.cpp index cbe621b432..7113ae236b 100644 --- a/tket/src/TokenSwapping/TrivialTSA.cpp +++ b/tket/src/TokenSwapping/TrivialTSA.cpp @@ -109,8 +109,10 @@ void TrivialTSA::do_final_checks() const { for (const auto& endpoints : m_cycle_endpoints) { for (auto id = endpoints.first;; id = m_abstract_cycles_vertices.next(id).value()) { + // GCOVR_EXCL_START TKET_ASSERT( m_vertices_seen.erase(m_abstract_cycles_vertices.at(id)) == 1); + // GCOVR_EXCL_STOP if (id == endpoints.second) { break; } @@ -142,8 +144,10 @@ void TrivialTSA::fill_disjoint_abstract_cycles( // Now, add the vertices to vertices seen... for (auto id = endpoints.first;; id = m_abstract_cycles_vertices.next(id).value()) { + // GCOVR_EXCL_START TKET_ASSERT( m_vertices_seen.insert(m_abstract_cycles_vertices.at(id)).second); + // GCOVR_EXCL_STOP if (id == endpoints.second) { break; } @@ -191,17 +195,21 @@ void TrivialTSA::append_partial_solution( continue; } const CyclicShiftCostEstimate estimate(m_vertices_work_vector, distances); + // GCOVR_EXCL_START TKET_ASSERT( estimate.estimated_concrete_swaps < std::numeric_limits::max()); TKET_ASSERT(estimate.start_v_index < m_vertices_work_vector.size()); + // GCOVR_EXCL_STOP if (estimate.estimated_concrete_swaps < best_estimated_concrete_swaps) { best_estimated_concrete_swaps = estimate.estimated_concrete_swaps; start_v_index = estimate.start_v_index; best_endpoints = endpoints; } } + // GCOVR_EXCL_START TKET_ASSERT( best_estimated_concrete_swaps < std::numeric_limits::max()); + // GCOVR_EXCL_STOP const auto swap_size_before = swaps.size(); const auto decrease = append_partial_solution_with_single_cycle( best_endpoints, start_v_index, swaps, vertex_mapping, distances, diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp index 8669e38d30..b251f21244 100644 --- a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -161,12 +161,14 @@ void VectorListHybridSkeleton::erase_interval( for (size_t nn = 1; nn < number_of_elements; ++nn) { last_element_index = m_links.at(last_element_index).next; + // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( last_element_index < m_links.size(), "erase_interval with start index " << index << ", number_of_elements=" << number_of_elements << ", size " << m_links.size() << ", runs out of elements at N=" << nn << " (got index " << last_element_index << ")"); + // GCOVR_EXCL_STOP } TKET_ASSERT(number_of_elements <= m_size); m_size -= number_of_elements; diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp index 3b3f4a9bce..6595f46c6b 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp @@ -478,8 +478,10 @@ OverwriteIntervalResult VectorListHybrid::overwrite_interval( for (;;) { m_data.at(result.final_overwritten_element_id) = *citer; ++result.number_of_overwritten_elements; + // GCOVR_EXCL_START TKET_ASSERT( result.number_of_overwritten_elements <= max_number_of_elements); + // GCOVR_EXCL_STOP ++citer; if (citer == new_elements_cend) { return result; diff --git a/tket/src/Utils/include/Utils/Assert.hpp b/tket/src/Utils/include/Utils/Assert.hpp index d66f42b369..019740f8ec 100644 --- a/tket/src/Utils/include/Utils/Assert.hpp +++ b/tket/src/Utils/include/Utils/Assert.hpp @@ -33,7 +33,10 @@ * to a stringstream) will NOT begin if `condition` is true, * so there is no performance penalty. * - * The code should be ignored by test code coverage, even if multiline. + * Note: the intention was that the code would be ignored by test code + * coverage, even if multiline. However that didn't work, so we may + * just manually surround the worst multiline offenders until we come up + * with a better solution. * * This also checks if evaluating `condition` itself throws an exception. * @@ -45,7 +48,6 @@ * there are additional problems which need to be overcome somehow. */ #define TKET_ASSERT_WITH_MESSAGE(condition, msg) \ - /* GCOVR_EXCL_START */ \ do { \ try { \ if (!(condition)) { \ @@ -72,7 +74,7 @@ tket::tket_log()->critical(ss.str()); \ std::abort(); \ } \ - } while (0) /* GCOVR_EXCL_STOP */ + } while (0) #define TKET_ASSERT(condition) \ do { \ From 8e7d4be1f46787b8b2aeaf63416272880b173001 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Tue, 8 Feb 2022 13:14:17 +0000 Subject: [PATCH 048/146] rename method, remove predicate from routing --- pytket/binders/mapping.cpp | 21 +++++++++++++++++++++ tket/src/Predicates/PassGenerators.cpp | 4 ++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index f50eb622e4..be32afbb07 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -1,15 +1,36 @@ #include +#include #include #include +#include +#include "Circuit/Circuit.hpp" #include "Mapping/LexiRoute.hpp" #include "Mapping/MappingManager.hpp" #include "Mapping/RoutingMethodCircuit.hpp" +#include "TokenSwapping/main_entry_functions.hpp" +#include "binder_utils.hpp" namespace py = pybind11; namespace tket { + +std::vector> get_ts_swaps( + const Architecture& architecture, const NodeMapping& node_mapping) { + return get_swaps(architecture, node_mapping); +} + PYBIND11_MODULE(mapping, m) { + m.def( + "get_token_swapping_network", &get_ts_swaps, + "For a given architecture and map from Node to Node, returns a list of " + "tuple of Node corresponding to a sequence of SWAP gates that would map" + "a state from the first node to second node. \n\n:param architecture: " + "Architecture SWAP network respects. \n:param node_mapping: Node from " + "and to " + "some logical state must travel.", + py::arg("architecture"), py::arg("node_mapping")); + py::class_>( m, "RoutingMethod", "Parent class for RoutingMethod, for inheritance purposes only, not for " diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index cec92667c2..8a2b64ca56 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -214,11 +214,11 @@ PassPtr gen_routing_pass( Transform t = Transform(trans); PredicatePtr twoqbpred = std::make_shared(); - PredicatePtr placedpred = std::make_shared(arc); + // PredicatePtr placedpred = std::make_shared(arc); PredicatePtr n_qubit_pred = std::make_shared(arc.n_nodes()); PredicatePtrMap precons{ - CompilationUnit::make_type_pair(placedpred), + // CompilationUnit::make_type_pair(placedpred), CompilationUnit::make_type_pair(twoqbpred), CompilationUnit::make_type_pair(n_qubit_pred)}; From dc761e0895b7f5369c2605eb915b3972a6f72acc Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Tue, 8 Feb 2022 13:56:48 +0000 Subject: [PATCH 049/146] Add copyright information --- pytket/binders/mapping.cpp | 14 +++++++++++++ pytket/tests/mapping_test.py | 16 +++++++++----- tket/src/Mapping/LexiRoute.cpp | 14 +++++++++++++ .../src/Mapping/LexicographicalComparison.cpp | 14 +++++++++++++ tket/src/Mapping/MappingFrontier.cpp | 14 +++++++++++++ tket/src/Mapping/MappingManager.cpp | 14 +++++++++++++ tket/src/Mapping/MultiGateReorder.cpp | 14 +++++++++++++ tket/src/Mapping/RoutingMethodCircuit.cpp | 14 +++++++++++++ tket/src/Mapping/RoutingMethodJson.cpp | 14 +++++++++++++ .../src/Mapping/include/Mapping/LexiRoute.hpp | 19 +++++++++++++---- .../Mapping/LexicographicalComparison.hpp | 19 +++++++++++++---- .../include/Mapping/MappingFrontier.hpp | 21 ++++++++++++++----- .../include/Mapping/MappingManager.hpp | 21 ++++++++++++++----- .../include/Mapping/MultiGateReorder.hpp | 19 +++++++++++++---- .../Mapping/include/Mapping/RoutingMethod.hpp | 21 ++++++++++++++----- .../include/Mapping/RoutingMethodCircuit.hpp | 19 +++++++++++++---- .../include/Mapping/RoutingMethodJson.hpp | 19 +++++++++++++---- 17 files changed, 246 insertions(+), 40 deletions(-) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index f50eb622e4..a0fd34ec01 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py index 9f989b4279..5c65f37616 100644 --- a/pytket/tests/mapping_test.py +++ b/pytket/tests/mapping_test.py @@ -1,10 +1,16 @@ -# Copyright 2019-2021 Cambridge Quantum Computing +# Copyright 2019-2022 Cambridge Quantum Computing # -# You may not use this file except in compliance with the Licence. -# You may obtain a copy of the Licence in the LICENCE file accompanying -# these documents or at: +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# https://cqcl.github.io/pytket/build/html/licence.html +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from pytket.mapping import MappingManager, RoutingMethodCircuit, LexiRouteRoutingMethod # type: ignore from pytket.architecture import Architecture # type: ignore diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index f660928cc9..ed3b8480cd 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "Mapping/LexiRoute.hpp" #include "Mapping/MappingFrontier.hpp" diff --git a/tket/src/Mapping/LexicographicalComparison.cpp b/tket/src/Mapping/LexicographicalComparison.cpp index a5e5ea69b4..a221baa09f 100644 --- a/tket/src/Mapping/LexicographicalComparison.cpp +++ b/tket/src/Mapping/LexicographicalComparison.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "Mapping/LexicographicalComparison.hpp" namespace tket { diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index b38aefd7ac..e22b657c3a 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "Mapping/MappingFrontier.hpp" #include "Circuit/Circuit.hpp" diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index 3250345a1c..e2c671c21d 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "Mapping/MappingManager.hpp" #include "OpType/OpTypeFunctions.hpp" diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index e127489d7a..c05ea06122 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "Mapping/MultiGateReorder.hpp" #include "Mapping/MappingFrontier.hpp" diff --git a/tket/src/Mapping/RoutingMethodCircuit.cpp b/tket/src/Mapping/RoutingMethodCircuit.cpp index 32708452c9..474253a9d7 100644 --- a/tket/src/Mapping/RoutingMethodCircuit.cpp +++ b/tket/src/Mapping/RoutingMethodCircuit.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "RoutingMethodCircuit.hpp" namespace tket { diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index bb8c7c543e..1f9479c89f 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "Mapping/RoutingMethodJson.hpp" namespace tket { diff --git a/tket/src/Mapping/include/Mapping/LexiRoute.hpp b/tket/src/Mapping/include/Mapping/LexiRoute.hpp index 49aef0a62f..cbd2d7b4e2 100644 --- a/tket/src/Mapping/include/Mapping/LexiRoute.hpp +++ b/tket/src/Mapping/include/Mapping/LexiRoute.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_LexiRoute_H_ -#define _TKET_LexiRoute_H_ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Mapping/LexicographicalComparison.hpp" #include "Mapping/MappingFrontier.hpp" @@ -190,5 +203,3 @@ class LexiRouteRoutingMethod : public RoutingMethod { JSON_DECL(LexiRouteRoutingMethod); } // namespace tket - -#endif \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp b/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp index 0f8ccd0461..8911340ab2 100644 --- a/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp +++ b/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_LexicographicalComparison_H_ -#define _TKET_LexicographicalComparison_H_ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Architecture/Architecture.hpp" #include "Utils/BiMapHeaders.hpp" @@ -83,5 +96,3 @@ class LexicographicalComparison { }; } // namespace tket - -#endif \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp index 02c404869b..4c8772e644 100644 --- a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp +++ b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_MappingFrontier_H_ -#define _TKET_MappingFrontier_H_ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Architecture/Architecture.hpp" #include "Circuit/Circuit.hpp" @@ -166,6 +179,4 @@ struct MappingFrontier { void set_quantum_boundary(const unit_vertport_frontier_t& new_boundary); }; -} // namespace tket - -#endif \ No newline at end of file +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MappingManager.hpp b/tket/src/Mapping/include/Mapping/MappingManager.hpp index 1c1ba8a66f..09944a56b8 100644 --- a/tket/src/Mapping/include/Mapping/MappingManager.hpp +++ b/tket/src/Mapping/include/Mapping/MappingManager.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_MappingManager_H_ -#define _TKET_MappingManager_H_ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Architecture/Architecture.hpp" #include "Circuit/Circuit.hpp" @@ -44,6 +57,4 @@ class MappingManager { private: ArchitecturePtr architecture_; }; -} // namespace tket - -#endif \ No newline at end of file +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp index 9a4e4604be..cb7a51c300 100644 --- a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp +++ b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_MultiGateReorder_H_ -#define _TKET_MultiGateReorder_H_ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Mapping/MappingFrontier.hpp" #include "Mapping/RoutingMethod.hpp" @@ -80,5 +93,3 @@ class MultiGateReorderRoutingMethod : public RoutingMethod { }; } // namespace tket - -#endif \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp index b2a1136072..7dc4d7d344 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_RoutingMethod_H_ -#define _TKET_RoutingMethod_H_ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Mapping/MappingFrontier.hpp" #include "Utils/Json.hpp" @@ -57,6 +70,4 @@ class RoutingMethod { typedef std::shared_ptr RoutingMethodPtr; -} // namespace tket - -#endif \ No newline at end of file +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp index a500bf1b54..63a5ca7b15 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_RoutingMethodCircuit_H_ -#define _TKET_RoutingMethodCircuit_H_ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Mapping/RoutingMethod.hpp" @@ -57,5 +70,3 @@ class RoutingMethodCircuit : public RoutingMethod { JSON_DECL(RoutingMethod); } // namespace tket - -#endif diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp index 35ffa61fb9..9cbdb22e90 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_RoutingMethodJson_H_ -#define _TKET_RoutingMethodJson_H_ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Mapping/LexiRoute.hpp" #include "Mapping/MultiGateReorder.hpp" @@ -21,5 +34,3 @@ void from_json(const nlohmann::json& j, std::vector& rmp_v); JSON_DECL(std::vector); } // namespace tket - -#endif From d805b0bd8b63a16214d1d41bdd7926eaff95354e Mon Sep 17 00:00:00 2001 From: cqc-melf <70640934+cqc-melf@users.noreply.github.com> Date: Tue, 8 Feb 2022 15:16:29 +0100 Subject: [PATCH 050/146] [RV3] [refactor] Clean up of the dependencies of the modules (#199) * remove cycle from cmake lists * clean up * add comment * fix binder include * try to fix pytket build * try to fix problems * fix windows build * try to fix windows --- pytket/CMakeLists.txt | 10 ++++++---- tket/src/ArchAwareSynth/CMakeLists.txt | 5 ++--- tket/src/CMakeLists.txt | 8 +++++--- tket/src/Characterisation/CMakeLists.txt | 1 - tket/src/Converters/CMakeLists.txt | 3 +-- tket/src/Diagonalisation/CMakeLists.txt | 3 +-- tket/src/PauliGraph/CMakeLists.txt | 1 - tket/src/Placement/CMakeLists.txt | 3 +-- 8 files changed, 16 insertions(+), 18 deletions(-) diff --git a/pytket/CMakeLists.txt b/pytket/CMakeLists.txt index b7326df9cb..c491bd77b6 100644 --- a/pytket/CMakeLists.txt +++ b/pytket/CMakeLists.txt @@ -161,8 +161,9 @@ pybind11_add_module(architecture binders/architecture.cpp) target_include_directories(architecture PRIVATE binders/include) target_link_libraries(architecture PRIVATE tket-Architecture - tket-Graphs) -target_link_libraries(architecture PRIVATE ${CONAN_LIBS_SYMENGINE}) + tket-Graphs + tket-Utils) +target_link_libraries(architecture PRIVATE ${TKET_EXTRA_LIBS}) pybind11_add_module(placement binders/placement.cpp) @@ -170,8 +171,9 @@ target_include_directories(placement PRIVATE binders/include) target_link_libraries(placement PRIVATE tket-Placement tket-Architecture - tket-Circuit) -target_link_libraries(placement PRIVATE ${CONAN_LIBS_SYMENGINE}) + tket-Circuit + tket-Utils) +target_link_libraries(placement PRIVATE ${TKET_EXTRA_LIBS}) pybind11_add_module(program binders/program.cpp) target_include_directories(program PRIVATE binders/include) diff --git a/tket/src/ArchAwareSynth/CMakeLists.txt b/tket/src/ArchAwareSynth/CMakeLists.txt index 0ae6801093..95a85536fe 100644 --- a/tket/src/ArchAwareSynth/CMakeLists.txt +++ b/tket/src/ArchAwareSynth/CMakeLists.txt @@ -31,11 +31,10 @@ list(APPEND DEPS_${COMP} Converters Gate Graphs - Ops - OpType - Mapping PauliGraph Placement + Ops + OpType TokenSwapping Utils) diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index 083f42acae..dc58334a34 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -57,6 +57,8 @@ ELSEIF(APPLE) set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON) ENDIF() +# if you add new modules here make sure that it is added at the right position +# this list corresponds to a topological sorting of the dependency graph of the different modules list(APPEND TKET_COMPS Utils ZX @@ -74,11 +76,11 @@ list(APPEND TKET_COMPS Characterisation Converters TokenSwapping - Mapping Placement - MeasurementSetup - Transformations ArchAwareSynth + Mapping + MeasurementSetup + Transformations Predicates) foreach(COMP ${TKET_COMPS}) diff --git a/tket/src/Characterisation/CMakeLists.txt b/tket/src/Characterisation/CMakeLists.txt index a049c278fa..79a8ea1280 100644 --- a/tket/src/Characterisation/CMakeLists.txt +++ b/tket/src/Characterisation/CMakeLists.txt @@ -28,7 +28,6 @@ list(APPEND DEPS_${COMP} Circuit Gate Graphs - Mapping Ops OpType PauliGraph diff --git a/tket/src/Converters/CMakeLists.txt b/tket/src/Converters/CMakeLists.txt index 59a89b41ab..5892649513 100644 --- a/tket/src/Converters/CMakeLists.txt +++ b/tket/src/Converters/CMakeLists.txt @@ -31,8 +31,7 @@ list(APPEND DEPS_${COMP} Circuit Clifford Diagonalisation - Gate - Mapping + Gate Ops OpType PauliGraph diff --git a/tket/src/Diagonalisation/CMakeLists.txt b/tket/src/Diagonalisation/CMakeLists.txt index 224ee6c80b..ce77270497 100644 --- a/tket/src/Diagonalisation/CMakeLists.txt +++ b/tket/src/Diagonalisation/CMakeLists.txt @@ -27,8 +27,7 @@ list(APPEND DEPS_${COMP} Circuit Clifford Gate - Graphs - Mapping + Graphs Ops OpType PauliGraph diff --git a/tket/src/PauliGraph/CMakeLists.txt b/tket/src/PauliGraph/CMakeLists.txt index f3175eb79c..af9054fdc6 100644 --- a/tket/src/PauliGraph/CMakeLists.txt +++ b/tket/src/PauliGraph/CMakeLists.txt @@ -25,7 +25,6 @@ add_library(tket-${COMP} list(APPEND DEPS_${COMP} Clifford Gate - Mapping Ops OpType TokenSwapping diff --git a/tket/src/Placement/CMakeLists.txt b/tket/src/Placement/CMakeLists.txt index d949f51e78..29666d4e89 100644 --- a/tket/src/Placement/CMakeLists.txt +++ b/tket/src/Placement/CMakeLists.txt @@ -29,8 +29,7 @@ list(APPEND DEPS_${COMP} Characterisation Circuit Gate - Graphs - Mapping + Graphs Ops OpType TokenSwapping From e8f23dce87245bbc20ee67c4c685bb4bf0c22172 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 9 Feb 2022 10:16:48 +0000 Subject: [PATCH 051/146] add bimaps attribute to MappingFrontier --- tket/src/Mapping/MappingFrontier.cpp | 41 ++++++++++++++++++- tket/src/Mapping/MappingManager.cpp | 4 +- .../include/Mapping/MappingFrontier.hpp | 4 ++ 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index e22b657c3a..a36ccf44a3 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -52,6 +52,36 @@ std::shared_ptr frontier_convert_vertport_to_edge( MappingFrontier::MappingFrontier(Circuit& _circuit) : circuit_(_circuit) { this->quantum_boundary = std::make_shared(); this->classical_boundary = std::make_shared(); + this->bimaps_ = std::make_shared(); + // Set up {UnitID, VertPort} objects for quantum and classical boundaries + for (const Qubit& qb : this->circuit_.all_qubits()) { + this->quantum_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); + this->bimaps_->initial.insert({qb, qb}); + this->bimaps_->final.insert({qb, qb}); + } + for (const Bit& bit : this->circuit_.all_bits()) { + this->classical_boundary->insert( + {bit, + this->circuit_.get_nth_b_out_bundle(this->circuit_.get_in(bit), 0)}); + } +} + +/** + * Initialise quantum_boundary and classical_boundary from + * out edges of Input vertices + */ +MappingFrontier::MappingFrontier(Circuit& _circuit, std::shared_ptr _bimaps) : circuit_(_circuit), bimaps_(_bimaps) { + this->quantum_boundary = std::make_shared(); + this->classical_boundary = std::make_shared(); + // this->bimaps_ = std::make_shared(); + + // for(const std::pair& pair : mapping_frontier.bimaps_->initial){ + // this.bimaps_->initial.insert({pair.first, pair.second}); + // } + // for(const std::pair& pair : mapping_frontier.bimaps_->final){ + // this.bimaps_->final.insert({pair.first, pair.second}); + // } + // Set up {UnitID, VertPort} objects for quantum and classical boundaries for (const Qubit& qb : this->circuit_.all_qubits()) { this->quantum_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); @@ -64,9 +94,17 @@ MappingFrontier::MappingFrontier(Circuit& _circuit) : circuit_(_circuit) { } MappingFrontier::MappingFrontier(const MappingFrontier& mapping_frontier) - : circuit_(mapping_frontier.circuit_) { + : circuit_(mapping_frontier.circuit_), bimaps_(mapping_frontier.bimaps_) { this->quantum_boundary = std::make_shared(); this->classical_boundary = std::make_shared(); + // this->bimaps_ = std::make_shared(); + + // for(const std::pair& pair : mapping_frontier.bimaps_->initial){ + // this.bimaps_->initial.insert({pair.first, pair.second}); + // } + // for(const std::pair& pair : mapping_frontier.bimaps_->final){ + // this.bimaps_->final.insert({pair.first, pair.second}); + // } for (const std::pair& pair : mapping_frontier.quantum_boundary->get()) { this->quantum_boundary->insert({pair.first, pair.second}); @@ -450,6 +488,7 @@ void MappingFrontier::add_swap(const UnitID& uid_0, const UnitID& uid_1) { this->circuit_.boundary.get().insert({uid_1, uid1_in, uid0_out}); std::map final_map = {{n0, n1}, {n1, n0}}; + this->circuit_.update_final_map(final_map); } diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index e2c671c21d..b7e3603653 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -24,11 +24,11 @@ MappingManager::MappingManager(const ArchitecturePtr& _architecture) bool MappingManager::route_circuit( Circuit& circuit, - const std::vector& routing_methods) const { + const std::vector& routing_methods, + std::shared_ptr maps) const { // Assumption; Routing can not route a circuit // with more logical qubits than an Architecture has // physical qubits physically permitted - if (circuit.n_qubits() > this->architecture_->n_nodes()) { std::string error_string = "Circuit has" + std::to_string(circuit.n_qubits()) + diff --git a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp index 4c8772e644..4d209e8f83 100644 --- a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp +++ b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp @@ -64,8 +64,12 @@ struct MappingFrontier { std::set ancilla_nodes_; + std::shared_ptr bimaps_; + MappingFrontier(Circuit& _circuit); + MappingFrontier(Circuit& _circuit, std::shared_ptr _bimaps); + // copy constructor MappingFrontier(const MappingFrontier& mapping_frontier); From 4a210cc7c0bb1a890a3d2a9fda0f297a6434a5b2 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 9 Feb 2022 13:51:42 +0000 Subject: [PATCH 052/146] update_quantum_boundary_uids remaps bimaps entries --- tket/src/Mapping/MappingFrontier.cpp | 99 ++++++++++++++----- tket/src/Mapping/MappingManager.cpp | 16 ++- .../include/Mapping/MappingFrontier.hpp | 6 ++ .../include/Mapping/MappingManager.hpp | 19 ++++ tket/src/Predicates/CompilerPass.cpp | 1 - tket/src/Predicates/PassGenerators.cpp | 13 +-- 6 files changed, 114 insertions(+), 40 deletions(-) diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index a36ccf44a3..86fd62bc8c 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -70,17 +70,11 @@ MappingFrontier::MappingFrontier(Circuit& _circuit) : circuit_(_circuit) { * Initialise quantum_boundary and classical_boundary from * out edges of Input vertices */ -MappingFrontier::MappingFrontier(Circuit& _circuit, std::shared_ptr _bimaps) : circuit_(_circuit), bimaps_(_bimaps) { +MappingFrontier::MappingFrontier( + Circuit& _circuit, std::shared_ptr _bimaps) + : circuit_(_circuit), bimaps_(_bimaps) { this->quantum_boundary = std::make_shared(); this->classical_boundary = std::make_shared(); - // this->bimaps_ = std::make_shared(); - - // for(const std::pair& pair : mapping_frontier.bimaps_->initial){ - // this.bimaps_->initial.insert({pair.first, pair.second}); - // } - // for(const std::pair& pair : mapping_frontier.bimaps_->final){ - // this.bimaps_->final.insert({pair.first, pair.second}); - // } // Set up {UnitID, VertPort} objects for quantum and classical boundaries for (const Qubit& qb : this->circuit_.all_qubits()) { @@ -97,14 +91,7 @@ MappingFrontier::MappingFrontier(const MappingFrontier& mapping_frontier) : circuit_(mapping_frontier.circuit_), bimaps_(mapping_frontier.bimaps_) { this->quantum_boundary = std::make_shared(); this->classical_boundary = std::make_shared(); - // this->bimaps_ = std::make_shared(); - - // for(const std::pair& pair : mapping_frontier.bimaps_->initial){ - // this.bimaps_->initial.insert({pair.first, pair.second}); - // } - // for(const std::pair& pair : mapping_frontier.bimaps_->final){ - // this.bimaps_->final.insert({pair.first, pair.second}); - // } + for (const std::pair& pair : mapping_frontier.quantum_boundary->get()) { this->quantum_boundary->insert({pair.first, pair.second}); @@ -342,6 +329,22 @@ void MappingFrontier::update_quantum_boundary_uids( unit_map_t relabel = {label}; this->circuit_.rename_units(relabel); } + + // update initial map + auto it = this->bimaps_->initial.right.find(label.first); + if (it != this->bimaps_->initial.right.end()) { + UnitID simple_q_init = it->second; + this->bimaps_->initial.left.erase(simple_q_init); + this->bimaps_->initial.left.insert({simple_q_init, label.second}); + } + + // update final map + it = this->bimaps_->final.right.find(label.first); + if (it != this->bimaps_->final.right.end()) { + UnitID simple_q_final = it->second; + this->bimaps_->final.left.erase(simple_q_final); + this->bimaps_->final.left.insert({simple_q_final, label.second}); + } } } } @@ -488,8 +491,8 @@ void MappingFrontier::add_swap(const UnitID& uid_0, const UnitID& uid_1) { this->circuit_.boundary.get().insert({uid_1, uid1_in, uid0_out}); std::map final_map = {{n0, n1}, {n1, n0}}; - - this->circuit_.update_final_map(final_map); + + this->update_final_map(final_map); } void MappingFrontier::add_bridge( @@ -533,13 +536,17 @@ void MappingFrontier::add_ancilla(const UnitID& ancilla) { Qubit qb(ancilla); this->circuit_.add_qubit(qb); this->quantum_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); + + this->bimaps_->initial.insert({qb, qb}); + this->bimaps_->final.insert({qb, qb}); this->ancilla_nodes_.insert(Node(ancilla)); UnitID uid_ancilla(ancilla); unit_map_t update_map; update_map.insert({uid_ancilla, uid_ancilla}); - this->circuit_.update_initial_map(update_map); - this->circuit_.update_final_map(update_map); + + this->update_initial_map(update_map); + this->update_final_map(update_map); } void MappingFrontier::merge_ancilla( @@ -588,11 +595,53 @@ void MappingFrontier::merge_ancilla( // Can now just erase "merge" qubit from the circuit this->circuit_.boundary.get().erase(merge); - if (this->circuit_.unit_bimaps_.initial) { - this->circuit_.unit_bimaps_.initial->right.erase(merge); + this->bimaps_->initial.right.erase(merge); + this->bimaps_->final.left.erase(merge); +} + +template +void MappingFrontier::update_initial_map(const std::map& qm) { + // Can only work for Unit classes + static_assert(std::is_base_of::value); + static_assert(std::is_base_of::value); + // Unit types must be related, so cannot rename e.g. Bits to Qubits + static_assert( + std::is_base_of::value || + std::is_base_of::value); + unit_map_t new_initial_map; + for (const std::pair& pair : qm) { + const auto& it = this->bimaps_->initial.right.find(pair.first); + if (it == this->bimaps_->initial.right.end()) { + continue; + } + new_initial_map.insert({it->second, pair.second}); + this->bimaps_->initial.right.erase(pair.first); + } + for (const std::pair& pair : new_initial_map) { + this->bimaps_->initial.left.insert(pair); + } +} + +template +void MappingFrontier::update_final_map(const std::map& qm) { + // Can only work for Unit classes + static_assert(std::is_base_of::value); + static_assert(std::is_base_of::value); + // Unit types must be related, so cannot rename e.g. Bits to Qubits + static_assert( + std::is_base_of::value || + std::is_base_of::value); + unit_map_t new_final_map; + for (const std::pair& pair : qm) { + const auto& it = this->bimaps_->final.right.find(pair.first); + if (it == this->bimaps_->final.right.end()) { + continue; + } + new_final_map.insert({it->second, pair.second}); + this->bimaps_->final.right.erase(pair.first); } - if (this->circuit_.unit_bimaps_.final) { - this->circuit_.unit_bimaps_.final->right.erase(merge); + for (const std::pair& pair : new_final_map) { + this->bimaps_->final.left.insert(pair); } } diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index b7e3603653..3df1a52f17 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -24,7 +24,13 @@ MappingManager::MappingManager(const ArchitecturePtr& _architecture) bool MappingManager::route_circuit( Circuit& circuit, - const std::vector& routing_methods, + const std::vector& routing_methods) const { + return this->route_circuit_with_maps( + circuit, routing_methods, std::make_shared()); +} + +bool MappingManager::route_circuit_with_maps( + Circuit& circuit, const std::vector& routing_methods, std::shared_ptr maps) const { // Assumption; Routing can not route a circuit // with more logical qubits than an Architecture has @@ -41,8 +47,12 @@ bool MappingManager::route_circuit( // mapping_frontier tracks boundary between routed & un-routed in circuit // when initialised, boundary is over output edges of input vertices - std::shared_ptr mapping_frontier = - std::make_shared(circuit); + std::shared_ptr mapping_frontier; + if (maps) { + mapping_frontier = std::make_shared(circuit, maps); + } else { + mapping_frontier = std::make_shared(circuit); + } // updates routed/un-routed boundary mapping_frontier->advance_frontier_boundary(this->architecture_); diff --git a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp index 4d209e8f83..0554ae7c6d 100644 --- a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp +++ b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp @@ -181,6 +181,12 @@ struct MappingFrontier { * @param new_boundary Object to reassign with. */ void set_quantum_boundary(const unit_vertport_frontier_t& new_boundary); + + template + void update_initial_map(const std::map& qm); + + template + void update_final_map(const std::map& qm); }; } // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MappingManager.hpp b/tket/src/Mapping/include/Mapping/MappingManager.hpp index 09944a56b8..09f342ee4a 100644 --- a/tket/src/Mapping/include/Mapping/MappingManager.hpp +++ b/tket/src/Mapping/include/Mapping/MappingManager.hpp @@ -54,6 +54,25 @@ class MappingManager { Circuit& circuit, const std::vector& routing_methods) const; + /** + * route_circuit_maps + * Referenced Circuit modified such that all multi-qubit gates are permitted + * by this->architecture_ RoutingIncompability thrown if Circuit has more + * logical qubits than Architecture has physical qubits RoutingIncompability + * thrown if Circuit has a gate of OpType not in Architecture's permitted + * OpTypes + * + * @param circuit Circuit to be routed + * @param routing_methods Ranked RoutingMethod objects to use for routing + * segments. + * @param maps For tracking placed and permuted qubits during Compilation + * + * @return True if circuit is modified + */ + bool route_circuit_with_maps( + Circuit& circuit, const std::vector& routing_methods, + std::shared_ptr maps) const; + private: ArchitecturePtr architecture_; }; diff --git a/tket/src/Predicates/CompilerPass.cpp b/tket/src/Predicates/CompilerPass.cpp index 8838c93509..9a72cfe774 100644 --- a/tket/src/Predicates/CompilerPass.cpp +++ b/tket/src/Predicates/CompilerPass.cpp @@ -14,7 +14,6 @@ #include "CompilerPass.hpp" - #include #include "Mapping/RoutingMethodJson.hpp" diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index 44e115a319..d54f752be8 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -215,19 +215,10 @@ PassPtr gen_cx_mapping_pass( PassPtr gen_routing_pass( const Architecture& arc, const std::vector& config) { - Transform::Transformation trans = [=](Circuit& circ) { - MappingManager mm(std::make_shared(arc)); - return mm.route_circuit(circ, config); - - /* -PassPtr gen_routing_pass(const Architecture& arc, const RoutingConfig& config) { Transform::Transformation trans = [=](Circuit& circ, std::shared_ptr maps) { - Routing route(circ, arc); - std::pair circbool = route.solve(config, maps); - circ = circbool.first; - return circbool.second; - */ + MappingManager mm(std::make_shared(arc)); + return mm.route_circuit_with_maps(circ, config, maps); }; Transform t = Transform(trans); From 3d997b45b397218ecab224ffa8f7050338344ffb Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Thu, 10 Feb 2022 13:13:36 +0000 Subject: [PATCH 053/146] [1777] Split `LexiRouteRoutingMethod` into two methods (#209) * LexiLabellingRoutingMethod class * Add labelling method using lexiroute methods * First LexiLabelling implementation, with testing * update defaults * confirm correct imports and routing method config * Expose LabellingRoutingMethod to pytket * Update json serilaization * reformat utils test * Address PR Comments --- pytket/binders/mapping.cpp | 14 +- pytket/binders/passes.cpp | 12 +- pytket/tests/backend_test.py | 4 +- pytket/tests/mapping_test.py | 6 +- pytket/tests/mitigation_test.py | 6 +- pytket/tests/predicates_test.py | 10 +- tket/src/Mapping/CMakeLists.txt | 1 + tket/src/Mapping/LexiLabelling.cpp | 78 +++++ tket/src/Mapping/LexiRoute.cpp | 100 +++--- tket/src/Mapping/RoutingMethodJson.cpp | 6 + .../Mapping/include/Mapping/LexiLabelling.hpp | 52 ++++ .../src/Mapping/include/Mapping/LexiRoute.hpp | 13 +- tket/src/Predicates/PassGenerators.cpp | 10 +- tket/tests/test_CompilerPass.cpp | 35 ++- tket/tests/test_LexiRoute.cpp | 287 ++++++++++++++++-- tket/tests/test_json.cpp | 9 +- 16 files changed, 541 insertions(+), 102 deletions(-) create mode 100644 tket/src/Mapping/LexiLabelling.cpp create mode 100644 tket/src/Mapping/include/Mapping/LexiLabelling.hpp diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index a0fd34ec01..83b829af40 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -16,6 +16,7 @@ #include #include +#include "Mapping/LexiLabelling.hpp" #include "Mapping/LexiRoute.hpp" #include "Mapping/MappingManager.hpp" #include "Mapping/RoutingMethodCircuit.hpp" @@ -64,15 +65,24 @@ PYBIND11_MODULE(mapping, m) { RoutingMethod>( m, "LexiRouteRoutingMethod", "Defines a RoutingMethod object for mapping circuits that uses the " - "Lexicographical Comparison approach outlined in arXiv:1902.08091.") + "Lexicographical Comparison approach outlined in arXiv:1902.08091." + "Only supports 1-qubit, 2-qubit and barrier gates.") .def( py::init(), - "LexiRoute constructor.\n\n:param lookahead: Maximum depth of " + "LexiRouteRoutingMethod constructor.\n\n:param lookahead: Maximum " + "depth of " "lookahead " "employed when picking SWAP for purpose of logical to physical " "mapping.", py::arg("lookahead") = 10); + py::class_< + LexiLabellingMethod, std::shared_ptr, RoutingMethod>( + m, "LexiLabellingMethod", + "Defines a RoutingMethod for labelling Qubits that uses the " + "Lexicographical Comparison approach outlined in arXiv:1902.08091.") + .def(py::init<>(), "LexiLabellingMethod constructor."); + py::class_( m, "MappingManager", "Defined by a pytket Architecture object, maps Circuit logical Qubits " diff --git a/pytket/binders/passes.cpp b/pytket/binders/passes.cpp index c384d9135e..c016db86d6 100644 --- a/pytket/binders/passes.cpp +++ b/pytket/binders/passes.cpp @@ -15,6 +15,8 @@ #include #include "ArchAwareSynth/SteinerForest.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" #include "Mapping/RoutingMethod.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" @@ -33,8 +35,9 @@ namespace tket { static PassPtr gen_cx_mapping_pass_kwargs( const Architecture &arc, const PlacementPtr &placer, py::kwargs kwargs) { - RoutingMethodPtr method = std::make_shared(100); - std::vector config = {method}; + std::vector config = { + std::make_shared(), + std::make_shared(100)}; if (kwargs.contains("config")) { config = py::cast>(kwargs["config"]); } @@ -50,8 +53,9 @@ static PassPtr gen_cx_mapping_pass_kwargs( } static PassPtr gen_default_routing_pass(const Architecture &arc) { - RoutingMethodPtr method = std::make_shared(100); - std::vector config = {method}; + std::vector config = { + std::make_shared(), + std::make_shared(100)}; return gen_routing_pass(arc, config); } diff --git a/pytket/tests/backend_test.py b/pytket/tests/backend_test.py index f40f361b30..2e6819fd47 100644 --- a/pytket/tests/backend_test.py +++ b/pytket/tests/backend_test.py @@ -24,7 +24,7 @@ from pytket.circuit import Circuit, OpType, BasisOrder, Qubit, Bit, Node # type: ignore from pytket.predicates import CompilationUnit # type: ignore from pytket.passes import PauliSimp, CliffordSimp, ContextSimp # type: ignore -from pytket.mapping import MappingManager, LexiRouteRoutingMethod # type: ignore +from pytket.mapping import MappingManager, LexiRouteRoutingMethod, LexiLabellingMethod # type: ignore from pytket.architecture import Architecture # type: ignore from pytket.utils.outcomearray import OutcomeArray, readout_counts from pytket.utils.prepare import prepare_circuit @@ -528,7 +528,7 @@ def test_postprocess_3() -> None: mm = MappingManager(arc) rc = c.copy() - mm.route_circuit(rc, [LexiRouteRoutingMethod()]) + mm.route_circuit(rc, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) n_shots = 100 h = b.process_circuit(b.get_compiled_circuit(c), n_shots=n_shots, postprocess=True) r = b.get_result(h) diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py index 5c65f37616..689fb3d1c6 100644 --- a/pytket/tests/mapping_test.py +++ b/pytket/tests/mapping_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytket.mapping import MappingManager, RoutingMethodCircuit, LexiRouteRoutingMethod # type: ignore +from pytket.mapping import MappingManager, RoutingMethodCircuit, LexiRouteRoutingMethod, LexiLabellingMethod # type: ignore from pytket.architecture import Architecture # type: ignore from pytket import Circuit, OpType from pytket.circuit import Node # type: ignore @@ -109,7 +109,7 @@ def test_LexiRouteRoutingMethod() -> None: nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) test_mm = MappingManager(test_a) - test_mm.route_circuit(test_c, [LexiRouteRoutingMethod()]) + test_mm.route_circuit(test_c, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) routed_commands = test_c.get_commands() assert routed_commands[0].op.type == OpType.CX @@ -158,6 +158,7 @@ def test_RoutingMethodCircuit_custom_list() -> None: RoutingMethodCircuit( route_subcircuit_func, check_subcircuit_func_false, 5, 5 ), + LexiLabellingMethod(), LexiRouteRoutingMethod(), ], ) @@ -179,6 +180,7 @@ def test_RoutingMethodCircuit_custom_list() -> None: RoutingMethodCircuit( route_subcircuit_func, check_subcircuit_func_true, 5, 5 ), + LexiLabellingMethod(), LexiRouteRoutingMethod(), ], ) diff --git a/pytket/tests/mitigation_test.py b/pytket/tests/mitigation_test.py index 7eb3032444..494b3993e1 100644 --- a/pytket/tests/mitigation_test.py +++ b/pytket/tests/mitigation_test.py @@ -17,7 +17,7 @@ from pytket.utils.spam import SpamCorrecter, compress_counts from pytket.circuit import Node, Circuit, Qubit # type: ignore -from pytket.mapping import MappingManager, LexiRouteRoutingMethod # type: ignore +from pytket.mapping import MappingManager, LexiLabellingMethod, LexiRouteRoutingMethod # type: ignore from pytket.architecture import Architecture # type: ignore from pytket.placement import place_with_map # type: ignore from pytket.passes import DelayMeasures # type: ignore @@ -112,7 +112,7 @@ def test_spam_integration() -> None: place_with_map(bellcc, qmap) mm = MappingManager(arc) rbell = bellcc.copy() - mm.route_circuit(rbell, [LexiRouteRoutingMethod()]) + mm.route_circuit(rbell, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) def check_correction( counts0: Dict[Tuple[int, ...], int], counts1: Dict[Tuple[int, ...], int] @@ -509,7 +509,7 @@ def test_spam_routing() -> None: testc = Circuit(4, 4).H(0).CX(0, 3).CX(1, 2).CX(0, 1).CX(3, 2).measure_all() routed = testc.copy() mm = MappingManager(arc) - mm.route_circuit(routed, [LexiRouteRoutingMethod()]) + mm.route_circuit(routed, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) DelayMeasures().apply(routed) readout = routed.qubit_readout diff --git a/pytket/tests/predicates_test.py b/pytket/tests/predicates_test.py index c3ad2ff8a8..d5dd9f9f6c 100644 --- a/pytket/tests/predicates_test.py +++ b/pytket/tests/predicates_test.py @@ -74,7 +74,7 @@ CompilationUnit, UserDefinedPredicate, ) -from pytket.mapping import LexiRouteRoutingMethod # type: ignore +from pytket.mapping import LexiLabellingMethod, LexiRouteRoutingMethod # type: ignore from pytket.architecture import Architecture # type: ignore from pytket.placement import Placement, GraphPlacement # type: ignore from pytket.transform import Transform, PauliSynthStrat, CXConfigType # type: ignore @@ -214,7 +214,9 @@ def test_routing_and_placement_pass() -> None: assert seq_pass.apply(cu2) assert cu2.initial_map == expected_map - full_pass = FullMappingPass(arc, pl, config=[LexiRouteRoutingMethod()]) + full_pass = FullMappingPass( + arc, pl, config=[LexiLabellingMethod(), LexiRouteRoutingMethod()] + ) cu3 = CompilationUnit(circ.copy()) assert full_pass.apply(cu3) assert cu3.initial_map == expected_map @@ -657,7 +659,9 @@ def sq(a: float, b: float, c: float) -> Circuit: [k.to_list(), v.to_list()] for k, v in qm.items() ] # FullMappingPass - fm_pass = FullMappingPass(arc, placer, config=[LexiRouteRoutingMethod()]) + fm_pass = FullMappingPass( + arc, placer, config=[LexiLabellingMethod(), LexiRouteRoutingMethod()] + ) assert fm_pass.to_dict()["pass_class"] == "SequencePass" p_pass = fm_pass.get_sequence()[0] r_pass = fm_pass.get_sequence()[1] diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt index 534adc2ea8..11839fc85d 100644 --- a/tket/src/Mapping/CMakeLists.txt +++ b/tket/src/Mapping/CMakeLists.txt @@ -21,6 +21,7 @@ endif() add_library(tket-${COMP} LexicographicalComparison.cpp LexiRoute.cpp + LexiLabelling.cpp MappingFrontier.cpp MappingManager.cpp MultiGateReorder.cpp diff --git a/tket/src/Mapping/LexiLabelling.cpp b/tket/src/Mapping/LexiLabelling.cpp new file mode 100644 index 0000000000..f748d94c2c --- /dev/null +++ b/tket/src/Mapping/LexiLabelling.cpp @@ -0,0 +1,78 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "Mapping/LexiLabelling.hpp" + +namespace tket { + +bool LexiLabellingMethod::check_method( + const std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + std::set already_checked; + // *it = {UnitID, {Vertex, Port}} + for (auto it = mapping_frontier->quantum_boundary->get().begin(); + it != mapping_frontier->quantum_boundary->get().end(); ++it) { + Edge e0 = mapping_frontier->circuit_.get_nth_out_edge( + it->second.first, it->second.second); + Vertex v0 = mapping_frontier->circuit_.target(e0); + Node node = Node(it->first); + // i.e. skip already checked vertices + if (already_checked.find(node) == already_checked.end()) { + already_checked.insert(node); + // for countercircuit_.n_in_edges_of_type(v0, EdgeType::Quantum); + int counter = 1; // 1 edge + auto jt = it; + ++jt; + while (jt != mapping_frontier->quantum_boundary->get().end() && + counter < n_edges) { + Edge e1 = mapping_frontier->circuit_.get_nth_out_edge( + jt->second.first, jt->second.second); + Vertex v1 = mapping_frontier->circuit_.target(e1); + if (v0 == v1) { + counter++; + // confirms that there is at least one multi-qubit gate in the first + // layer which is assigned to some Qubit not in the architecture + if (!architecture->node_exists(Node(it->first)) || + !architecture->node_exists(Node(jt->first))) { + return true; + } + } + ++jt; + } + } + } + return false; +} + +unit_map_t LexiLabellingMethod::routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + LexiRoute lr(architecture, mapping_frontier); + lr.solve_labelling(); + return {}; +} + +nlohmann::json LexiLabellingMethod::serialize() const { + nlohmann::json j; + j["name"] = "LexiLabellingMethod"; + return j; +} + +LexiLabellingMethod LexiLabellingMethod::deserialize( + const nlohmann::json& /*j*/) { + return LexiLabellingMethod(); +} + +} // namespace tket diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index ed3b8480cd..563d61fa38 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -189,34 +189,40 @@ void LexiRoute::set_interacting_uids(bool assigned_only) { it->second.first, it->second.second); Vertex v0 = this->mapping_frontier_->circuit_.target(e0); // should never be input vertex, so can always use in_edges - int n_edges = this->mapping_frontier_->circuit_.n_in_edges_of_type( - v0, EdgeType::Quantum); - if (n_edges == 2) { - auto jt = it; - ++jt; - for (; - jt != this->mapping_frontier_->quantum_boundary->get().end(); - ++jt) { - // i.e. if vertices match - Edge e1 = this->mapping_frontier_->circuit_.get_nth_out_edge( - jt->second.first, jt->second.second); - Vertex v1 = this->mapping_frontier_->circuit_.target(e1); - if (v0 == v1) { - // we can assume a qubit will only be in one interaction - // we can assume from how we iterate through pairs that each qubit - // will only be found in one match - if (!assigned_only || - (this->architecture_->node_exists(Node(it->first)) && - this->architecture_->node_exists(Node(jt->first)))) { - interacting_uids_.insert({it->first, jt->first}); - interacting_uids_.insert({jt->first, it->first}); + if (this->mapping_frontier_->circuit_.get_OpType_from_Vertex(v0) != + OpType::Barrier) { + int n_edges = this->mapping_frontier_->circuit_.n_in_edges_of_type( + v0, EdgeType::Quantum); + // make forwards = backwards + if (n_edges == 2) { + auto jt = it; + ++jt; + while (jt != + this->mapping_frontier_->quantum_boundary->get().end()) { + // i.e. if vertices match + Edge e1 = this->mapping_frontier_->circuit_.get_nth_out_edge( + jt->second.first, jt->second.second); + Vertex v1 = this->mapping_frontier_->circuit_.target(e1); + if (v0 == v1) { + // we can assume a qubit will only be in one interaction + // we can assume from how we iterate through pairs that each qubit + // will only be found in one match + if (!assigned_only || + (this->architecture_->node_exists(Node(it->first)) && + this->architecture_->node_exists(Node(jt->first)))) { + interacting_uids_.insert({it->first, jt->first}); + interacting_uids_.insert({jt->first, it->first}); + } } + ++jt; } + } else if ( + n_edges > 2 && + this->mapping_frontier_->circuit_.get_OpType_from_Vertex(v0) != + OpType::Barrier) { + throw LexiRouteError( + "LexiRoute only supports non-Barrier vertices with 1 or 2 edges."); } - } else if ( - n_edges > 2 && this->mapping_frontier_->circuit_.get_OpType_from_Vertex( - v0) != OpType::Barrier) { - TKET_ASSERT(!"Non-Barrier vertex should only have 1 or 2 edges."); } } } @@ -394,6 +400,12 @@ void LexiRoute::remove_swaps_decreasing(swap_set_t& swaps) { } } +void LexiRoute::solve_labelling() { + this->update_labelling(); + this->mapping_frontier_->update_quantum_boundary_uids(this->labelling_); + return; +} + void LexiRoute::solve(unsigned lookahead) { // store a copy of the original this->mapping_frontier_->quantum_boundray // this object will be updated and reset throughout the swap picking procedure @@ -403,22 +415,8 @@ void LexiRoute::solve(unsigned lookahead) { this->mapping_frontier_->quantum_boundary->get()) { copy.insert({pair.first, pair.second}); } - // some Qubits in boundary of this->mapping_frontier_->circuit_ may not be - // this->architecture_ Node If true, assign physical meaning by replacing with - // Node from this->architecture_ - // "candidate_swaps" are connected pairs of Node in this->architecture_ s.t. - // at least one is in an "interaction" and both are "assigned" i.e. present in - // this->mapping_frontier_->circuit - - bool updated = this->update_labelling(); - if (updated) { - // update unit id at boundary in case of relabelling - this->mapping_frontier_->update_quantum_boundary_uids(this->labelling_); - return; - } swap_set_t candidate_swaps = this->get_candidate_swaps(); this->remove_swaps_decreasing(candidate_swaps); - TKET_ASSERT(candidate_swaps.size() != 0); // Only want to substitute a single swap // check next layer of interacting qubits and remove swaps until only one @@ -510,8 +508,28 @@ LexiRouteRoutingMethod::LexiRouteRoutingMethod(unsigned _max_depth) : max_depth_(_max_depth){}; bool LexiRouteRoutingMethod::check_method( - const std::shared_ptr& /*mapping_frontier*/, - const ArchitecturePtr& /*architecture*/) const { + const std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + std::set unplaced; + for (const std::pair& pair : + mapping_frontier->quantum_boundary->get()) { + // only supports single qubit, two-qubit gates, barrier gates + // and BRIDGE gates added by the routing code + if ((mapping_frontier->circuit_.n_in_edges_of_type( + pair.second.first, EdgeType::Quantum) > 2 && + mapping_frontier->circuit_.get_OpType_from_Vertex(pair.second.first) != + OpType::BRIDGE)) { + return false; + } else if (!architecture->node_exists(Node(pair.first))) { + // if multi-qubit vertex doesn't have all edges in frontier then + // won't be check in routing_method anyway + if (unplaced.find(pair.second.first) == unplaced.end()) { + unplaced.insert(pair.second.first); + } else { + return false; + } + } + } return true; } diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index 1f9479c89f..b2d3ada7fd 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -14,6 +14,8 @@ #include "Mapping/RoutingMethodJson.hpp" +#include "Mapping/LexiLabelling.hpp" + namespace tket { void to_json(nlohmann::json& j, const RoutingMethod& rm) { j = rm.serialize(); } @@ -31,6 +33,10 @@ void to_json(nlohmann::json& j, const std::vector& rmp_v) { void from_json(const nlohmann::json& j, std::vector& rmp_v) { for (const auto& c : j) { std::string name = c.at("name").get(); + if (name == "LexiLabellingMethod") { + rmp_v.push_back(std::make_shared( + LexiLabellingMethod::deserialize(c))); + } if (name == "LexiRouteRoutingMethod") { rmp_v.push_back(std::make_shared( LexiRouteRoutingMethod::deserialize(c))); diff --git a/tket/src/Mapping/include/Mapping/LexiLabelling.hpp b/tket/src/Mapping/include/Mapping/LexiLabelling.hpp new file mode 100644 index 0000000000..59fd7f756a --- /dev/null +++ b/tket/src/Mapping/include/Mapping/LexiLabelling.hpp @@ -0,0 +1,52 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class LexiLabellingMethod : public RoutingMethod { + public: + /** + * Checking and Routing methods redefined for dynamically assigning qubits to + * some Architecture. + */ + LexiLabellingMethod(){}; + + /** + * @return true if method can label unlabelled qubits + */ + bool check_method( + const std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + unit_map_t routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + nlohmann::json serialize() const override; + + static LexiLabellingMethod deserialize(const nlohmann::json& j); +}; +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/LexiRoute.hpp b/tket/src/Mapping/include/Mapping/LexiRoute.hpp index cbd2d7b4e2..fe0c244880 100644 --- a/tket/src/Mapping/include/Mapping/LexiRoute.hpp +++ b/tket/src/Mapping/include/Mapping/LexiRoute.hpp @@ -50,10 +50,7 @@ class LexiRoute { * MappingFrontier object passed at class construction. Either a SWAP gate * will be inserted at the input boundary of the held Circuit or a CX gate * will be transformed into a BRIDGE gate. The added SWAP or BRIDGE gate will - * be valid for the Architecture passed at class construction. Additionally, - * an "unlabelled" Qubit in the Circuit may be relabelled to a Node in the - * Architecture, or an "unlabelled" Qubit may have its path merged with an - * ancilla qubit. + * be valid for the Architecture passed at class construction. * The decision making is based on the heuristic outlined in arXiv:1902.08091. * * @param lookahead Number of slices to lookahead at when determining best @@ -61,6 +58,14 @@ class LexiRoute { */ void solve(unsigned lookahead); + /** + * When called an "unlabelled" Qubit in the Circuit may be relabelled to a + * Node in the Architecture, or an "unlabelled" Qubit may have its path merged + * with an ancilla qubit. The decision making is based on the heuristic + * outlined in arXiv:1902.08091. + */ + void solve_labelling(); + private: /** * this->interacting_uids_ attribute is a map where key is one UnitID diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index d54f752be8..e383798c93 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -20,8 +20,9 @@ #include "Circuit/CircPool.hpp" #include "Circuit/Circuit.hpp" #include "Converters/PhasePoly.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" #include "Mapping/MappingManager.hpp" -#include "Mapping/RoutingMethod.hpp" #include "Placement/Placement.hpp" #include "Predicates/CompilationUnit.hpp" #include "Predicates/CompilerPass.hpp" @@ -192,9 +193,10 @@ PassPtr gen_full_mapping_pass( } PassPtr gen_default_mapping_pass(const Architecture& arc) { - PlacementPtr pp = std::make_shared(arc); - RoutingMethodPtr rmw = std::make_shared(100); - return gen_full_mapping_pass(arc, pp, {rmw}); + return gen_full_mapping_pass( + arc, std::make_shared(arc), + {std::make_shared(), + std::make_shared(100)}); } PassPtr gen_cx_mapping_pass( diff --git a/tket/tests/test_CompilerPass.cpp b/tket/tests/test_CompilerPass.cpp index 7342b4820a..9768d7ee80 100644 --- a/tket/tests/test_CompilerPass.cpp +++ b/tket/tests/test_CompilerPass.cpp @@ -16,6 +16,8 @@ #include #include "Circuit/Circuit.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" #include "OpType/OpType.hpp" #include "OpType/OpTypeFunctions.hpp" #include "Placement/Placement.hpp" @@ -252,9 +254,10 @@ SCENARIO("Test making (mostly routing) passes using PassGenerators") { CompilationUnit cu(circ, preds); PlacementPtr pp = std::make_shared(grid); - LexiRouteRoutingMethod lrrm(50); - RoutingMethodPtr rmw = std::make_shared(lrrm); - PassPtr cp_route = gen_full_mapping_pass(grid, pp, {rmw}); + PassPtr cp_route = gen_full_mapping_pass( + grid, pp, + {std::make_shared(), + std::make_shared(50)}); PassPtr all_passes = SynthesiseHQS() >> SynthesiseOQC() >> SynthesiseUMD() >> SynthesiseTket() >> cp_route; @@ -900,9 +903,9 @@ SCENARIO("DecomposeArbitrarilyControlledGates test") { SCENARIO("Precomposed passes successfully compose") { GIVEN("gen_directed_cx_routing_pass") { RingArch arc(6); - LexiRouteRoutingMethod lrrm(50); - RoutingMethodPtr rmw = std::make_shared(lrrm); - REQUIRE_NOTHROW(gen_directed_cx_routing_pass(arc, {rmw})); + REQUIRE_NOTHROW(gen_directed_cx_routing_pass( + arc, {std::make_shared(), + std::make_shared(50)})); } } @@ -923,9 +926,9 @@ SCENARIO("Test Pauli Graph Synthesis Pass") { SCENARIO("Compose Pauli Graph synthesis Passes") { RingArch arc(10); - LexiRouteRoutingMethod lrrm(50); - RoutingMethodPtr rmw = std::make_shared(lrrm); - PassPtr dir_pass = gen_directed_cx_routing_pass(arc, {rmw}); + PassPtr dir_pass = gen_directed_cx_routing_pass( + arc, {std::make_shared(), + std::make_shared(50)}); GIVEN("Special UCC Synthesis") { PassPtr spec_ucc = gen_special_UCC_synthesis(); REQUIRE_NOTHROW(spec_ucc >> dir_pass); @@ -1008,9 +1011,10 @@ SCENARIO("Commute measurements to the end of a circuit") { Architecture line({{0, 1}, {1, 2}, {2, 3}}); PlacementPtr pp = std::make_shared(line); - LexiRouteRoutingMethod lrrm(50); - RoutingMethodPtr rmw = std::make_shared(lrrm); - PassPtr route_pass = gen_full_mapping_pass(line, pp, {rmw}); + PassPtr route_pass = gen_full_mapping_pass( + line, pp, + {std::make_shared(), + std::make_shared(50)}); CompilationUnit cu(test); route_pass->apply(cu); REQUIRE(delay_pass->apply(cu)); @@ -1079,10 +1083,11 @@ SCENARIO("CX mapping pass") { REQUIRE(is_classical_map(c_placed)); // Route - LexiRouteRoutingMethod lrrm(50); - RoutingMethodPtr rmw = std::make_shared(lrrm); CompilationUnit cu_route(c_placed); - gen_routing_pass(line, {rmw})->apply(cu_route); + gen_routing_pass( + line, {std::make_shared(), + std::make_shared(50)}) + ->apply(cu_route); const Circuit& c_routed = cu_route.get_circ_ref(); // Rebase again diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index 1b24da5da9..2442501d45 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -1,5 +1,6 @@ #include +#include "Mapping/LexiLabelling.hpp" #include "Mapping/LexiRoute.hpp" #include "Mapping/MappingManager.hpp" #include "Predicates/CompilationUnit.hpp" @@ -8,7 +9,8 @@ #include "Predicates/PassLibrary.hpp" namespace tket { -SCENARIO("Test LexiRoute::solve") { + +SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::vector nodes = {Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), Node("node_test", 3), Node("node_test", 4), Node("node_test", 5), @@ -75,8 +77,8 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf0 = std::make_shared(circ); LexiRoute lr(shared_arc, mf0); - - lr.solve(4); + lr.solve_labelling(); + // lr.solve(4); REQUIRE(mf0->circuit_.n_gates() == 3); @@ -86,7 +88,9 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf1 = std::make_shared(circ); LexiRoute lr1(shared_arc, mf1); + // lr1.solve_labelling(); lr1.solve(4); + std::vector commands = mf1->circuit_.get_commands(); Command swap_c = commands[1]; unit_vector_t uids = {nodes[1], nodes[2]}; @@ -138,7 +142,8 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr0(shared_arc, mf); - lr0.solve(20); + lr0.solve_labelling(); + // lr0.solve(20); std::vector commands = mf->circuit_.get_commands(); REQUIRE(commands.size() == 4); Command c = commands[0]; @@ -147,19 +152,19 @@ SCENARIO("Test LexiRoute::solve") { mf->advance_frontier_boundary(shared_arc); LexiRoute lr1(shared_arc, mf); - lr1.solve(20); + lr1.solve_labelling(); uids = {nodes[2], nodes[3]}; REQUIRE(mf->circuit_.get_commands()[1].get_args() == uids); mf->advance_frontier_boundary(shared_arc); LexiRoute lr2(shared_arc, mf); - lr2.solve(20); + lr2.solve_labelling(); uids = {nodes[2], nodes[5]}; REQUIRE(mf->circuit_.get_commands()[2].get_args() == uids); mf->advance_frontier_boundary(shared_arc); LexiRoute lr3(shared_arc, mf); - lr3.solve(20); + lr3.solve_labelling(); uids = {nodes[5], nodes[6]}; REQUIRE(mf->circuit_.get_commands()[3].get_args() == uids); } @@ -283,7 +288,7 @@ SCENARIO("Test LexiRoute::solve") { mf->advance_frontier_boundary(shared_arc); LexiRoute lr1(shared_arc, mf); - lr1.solve(20); + lr1.solve_labelling(); REQUIRE(circ.all_qubits()[0] == nodes[3]); } @@ -319,7 +324,7 @@ SCENARIO("Test LexiRoute::solve") { std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); LexiRoute lr0(shared_arc, mf); - lr0.solve(20); + lr0.solve_labelling(); mf->advance_frontier_boundary(shared_arc); LexiRoute lr1(shared_arc, mf); @@ -355,8 +360,9 @@ SCENARIO("Test LexiRoute::solve") { std::make_shared(circ); mf->ancilla_nodes_.insert(nodes[3]); mf->advance_frontier_boundary(shared_arc); + LexiRoute lr0(shared_arc, mf); - lr0.solve(20); + lr0.solve_labelling(); REQUIRE(circ.all_qubits()[1] == nodes[4]); REQUIRE(circ.all_qubits()[0] == nodes[3]); @@ -394,8 +400,8 @@ SCENARIO("Test LexiRoute::solve") { REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); } GIVEN( - "Labelling is required, but there are no free remaining qubits, for one " - "updated label, order 0.") { + "Labelling is required, but there are no free remaining qubits, for" + "one updated label, order 0.") { Circuit circ(9); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[1], qubits[8]}); @@ -412,11 +418,11 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); - REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); + REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); } GIVEN( - "Labelling is required, but there are no free remaining qubits, for one " - "updated label, order 1.") { + "Labelling is required, but there are no free remaining qubits, for " + " one updated label, order 1.") { Circuit circ(9); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[1], qubits[8]}); @@ -433,11 +439,11 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); - REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); + REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); } GIVEN( - "Labelling is required, but there are no free remaining qubits, for two " - "updated labels.") { + "Labelling is required, but there are no free remaining qubits, for" + "two updated labels.") { Circuit circ(10); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[9], qubits[8]}); @@ -454,10 +460,244 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); - REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); + REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); } } +SCENARIO("Test LexiLabellingMethod") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4)}; + + // straight line + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("No qubit to label, empty frontier, check_method.") { + Circuit circ(5); + std::shared_ptr mf = + std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.check_method(mf, shared_arc)); + } + GIVEN("No qubit to label, partially filled frontier, check_method.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); + circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[0]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, + {qubits[4], nodes[4]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.check_method(mf, shared_arc)); + } + GIVEN("Qubit to label, but casually restricted, check_method.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); + circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[0]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[4], nodes[4]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.check_method(mf, shared_arc)); + } + GIVEN( + "Two Qubit to label in future slice, causally restricted, " + "check_method.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[4]}); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.check_method(mf, shared_arc)); + } + GIVEN("Three Qubit Gate, all labelled, first slice, check_method.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CCX, {qubits[1], qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, + {qubits[4], nodes[4]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.check_method(mf, shared_arc)); + } + GIVEN("One unlabelled qubit, one slice, check and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + VertPort pre_label = + mf->quantum_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + REQUIRE(lrm.check_method(mf, shared_arc)); + lrm.routing_method(mf, shared_arc); + REQUIRE( + mf->quantum_boundary->get().find(qubits[3]) == + mf->quantum_boundary->get().end()); + VertPort post_label = + mf->quantum_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label == post_label); + } + GIVEN( + "One unlabelled qubit, two slices, lookahead for better solution, check" + " and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[2], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + VertPort pre_label = + mf->quantum_boundary->get().find(qubits[2])->second; + LexiLabellingMethod lrm; + REQUIRE(lrm.check_method(mf, shared_arc)); + lrm.routing_method(mf, shared_arc); + REQUIRE( + mf->quantum_boundary->get().find(qubits[2]) == + mf->quantum_boundary->get().end()); + VertPort post_label = + mf->quantum_boundary->get().find(nodes[2])->second; + REQUIRE(pre_label == post_label); + } + GIVEN("Two unlabelled qubits, one slice, check and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[2], qubits[3]}); + + std::map rename_map = { + {qubits[2], nodes[2]}, {qubits[1], nodes[1]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + VertPort pre_label_0 = + mf->quantum_boundary->get().find(qubits[0])->second; + VertPort pre_label_3 = + mf->quantum_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + REQUIRE(lrm.check_method(mf, shared_arc)); + lrm.routing_method(mf, shared_arc); + REQUIRE( + mf->quantum_boundary->get().find(qubits[0]) == + mf->quantum_boundary->get().end()); + REQUIRE( + mf->quantum_boundary->get().find(qubits[3]) == + mf->quantum_boundary->get().end()); + VertPort post_label_0 = + mf->quantum_boundary->get().find(nodes[0])->second; + REQUIRE(pre_label_0 == post_label_0); + VertPort post_label_3 = + mf->quantum_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label_3 == post_label_3); + } + GIVEN("Two unlabelled qubits, two slices, lookahead, check and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[2], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[4], qubits[3]}); + circ.add_op(OpType::CX, {qubits[2], qubits[4]}); + + std::map rename_map = { + {qubits[4], nodes[4]}, {qubits[1], nodes[1]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + VertPort pre_label_0 = + mf->quantum_boundary->get().find(qubits[2])->second; + VertPort pre_label_3 = + mf->quantum_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + REQUIRE(lrm.check_method(mf, shared_arc)); + lrm.routing_method(mf, shared_arc); + REQUIRE( + mf->quantum_boundary->get().find(qubits[2]) == + mf->quantum_boundary->get().end()); + REQUIRE( + mf->quantum_boundary->get().find(qubits[3]) == + mf->quantum_boundary->get().end()); + VertPort post_label_0 = + mf->quantum_boundary->get().find(nodes[0])->second; + REQUIRE(pre_label_0 == post_label_0); + VertPort post_label_3 = + mf->quantum_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label_3 == post_label_3); + } + GIVEN( + "Two unlabelled qubits, two slices, lookahead unrouted, check and " + "route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[2], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[4], qubits[3]}); + circ.add_op(OpType::CX, {qubits[2], qubits[0]}); + + std::map rename_map = { + {qubits[4], nodes[4]}, {qubits[1], nodes[1]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + VertPort pre_label_0 = + mf->quantum_boundary->get().find(qubits[2])->second; + VertPort pre_label_3 = + mf->quantum_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + REQUIRE(lrm.check_method(mf, shared_arc)); + lrm.routing_method(mf, shared_arc); + REQUIRE( + mf->quantum_boundary->get().find(qubits[2]) == + mf->quantum_boundary->get().end()); + REQUIRE( + mf->quantum_boundary->get().find(qubits[3]) == + mf->quantum_boundary->get().end()); + VertPort post_label_0 = + mf->quantum_boundary->get().find(nodes[0])->second; + REQUIRE(pre_label_0 == post_label_0); + VertPort post_label_3 = + mf->quantum_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label_3 == post_label_3); + } +} SCENARIO("Test LexiRouteRoutingMethod") { std::vector nodes = { Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), @@ -560,7 +800,7 @@ SCENARIO("Test LexiRouteRoutingMethod") { REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); } } -SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { +SCENARIO("Test MappingManager with LexiRouteRoutingMethod and LexiLabelling") { GIVEN("11 Node Architecture, 11 Qubit circuit, multiple SWAP required.") { std::vector nodes = { Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), @@ -589,7 +829,7 @@ SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { ArchitecturePtr shared_arc = std::make_shared(architecture); Circuit circ(11); std::vector qubits = circ.all_qubits(); - for (unsigned i = 0; i < 10; i++) { + for (unsigned i = 0; i < 11; i++) { circ.add_op(OpType::CX, {qubits[0], qubits[4]}); circ.add_op(OpType::CX, {qubits[6], qubits[7]}); circ.add_op(OpType::CX, {qubits[1], qubits[10]}); @@ -611,8 +851,11 @@ SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { std::shared_ptr mf = std::make_shared(copy_circ); + LexiLabellingMethod lrm; std::vector vrm = { + std::make_shared(lrm), std::make_shared(100)}; + REQUIRE(vrm[0]->check_method(mf, shared_arc)); bool res = mm.route_circuit(circ, vrm); @@ -640,7 +883,9 @@ SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { PassPtr dec = gen_decompose_routing_gates_to_cxs_pass(sg, false); MappingManager mm(shared_arc); + LexiLabellingMethod lrm; std::vector vrm = { + std::make_shared(lrm), std::make_shared(100)}; bool res = mm.route_circuit(circ, vrm); diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index acc586497d..d68cd25df6 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -24,6 +24,7 @@ #include "CircuitsForTesting.hpp" #include "Converters/PhasePoly.hpp" #include "Gate/SymTable.hpp" +#include "Mapping/LexiLabelling.hpp" #include "Mapping/LexiRoute.hpp" #include "Mapping/RoutingMethod.hpp" #include "OpType/OpType.hpp" @@ -35,6 +36,7 @@ #include "Transformations/Transform.hpp" #include "Utils/Json.hpp" #include "testutil.hpp" + namespace tket { namespace test_json { @@ -434,14 +436,19 @@ SCENARIO("Test RoutingMethod serializations") { std::vector rmp = { std::make_shared(rm), + std::make_shared(), std::make_shared(5)}; + nlohmann::json rmp_j = rmp; std::vector loaded_rmp_j = rmp_j.get>(); CHECK(!loaded_rmp_j[0]->check_method( std::make_shared(c), std::make_shared(2, 2))); - CHECK(loaded_rmp_j[1]->check_method( + CHECK(!loaded_rmp_j[1]->check_method( + std::make_shared(c), + std::make_shared(2, 2))); + CHECK(loaded_rmp_j[2]->check_method( std::make_shared(c), std::make_shared(2, 2))); } From 2bc26c0bc1dfcf563c679c4424396e81a9663adf Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Thu, 10 Feb 2022 15:01:37 +0000 Subject: [PATCH 054/146] [1795] RoutingMethod for Box Decompositoin (#212) * Reject boxes in Architecture::valid_operation * Add `next_q_cut` method to a quantum cut Only consider quantum edges * Use `next_q_cut` in `advance_frontier_boundary` * Add BoxDecompositionRoutingMethod * Add tests * Reformat * Reject boxes in LexiRouteMethod::check_method * Update tests * Add JSON serialisation * Handle unused arguments * Refactor Circuit::decompose_boxes * fix naming * update checking method for LexiRoute * Update lexilabelling check_method Co-authored-by: Yao Tang --- tket/src/Architecture/Architecture.cpp | 7 +- tket/src/Architecture/CMakeLists.txt | 3 + .../include/Architecture/Architecture.hpp | 5 +- tket/src/Circuit/include/Circuit/Circuit.hpp | 12 ++ tket/src/Circuit/macro_circ_info.cpp | 38 +++++ tket/src/Circuit/macro_manipulation.cpp | 39 +++-- tket/src/Mapping/BoxDecomposition.cpp | 71 +++++++++ tket/src/Mapping/CMakeLists.txt | 1 + tket/src/Mapping/LexiLabelling.cpp | 46 +++--- tket/src/Mapping/LexiRoute.cpp | 43 ++++-- tket/src/Mapping/MappingFrontier.cpp | 16 +- tket/src/Mapping/MultiGateReorder.cpp | 3 +- tket/src/Mapping/RoutingMethodJson.cpp | 3 + .../include/Mapping/BoxDecomposition.hpp | 63 ++++++++ .../include/Mapping/MultiGateReorder.hpp | 4 +- .../include/Mapping/RoutingMethodJson.hpp | 1 + tket/tests/Circuit/test_Circ.cpp | 29 ++++ tket/tests/test_BoxDecompRoutingMethod.cpp | 138 ++++++++++++++++++ tket/tests/test_MappingFrontier.cpp | 30 ++++ tket/tests/test_MultiGateReorder.cpp | 18 ++- tket/tests/test_json.cpp | 5 +- tket/tests/tkettestsfiles.cmake | 1 + 22 files changed, 486 insertions(+), 90 deletions(-) create mode 100644 tket/src/Mapping/BoxDecomposition.cpp create mode 100644 tket/src/Mapping/include/Mapping/BoxDecomposition.hpp create mode 100644 tket/tests/test_BoxDecompRoutingMethod.cpp diff --git a/tket/src/Architecture/Architecture.cpp b/tket/src/Architecture/Architecture.cpp index 479f734f08..3c88bb5081 100644 --- a/tket/src/Architecture/Architecture.cpp +++ b/tket/src/Architecture/Architecture.cpp @@ -27,7 +27,12 @@ namespace tket { // basic implementation that works off same prior assumptions // TODO: Update this for more mature systems of multi-qubit gates bool Architecture::valid_operation( - /*const OpType& optype, */ const std::vector& uids) const { + const Op_ptr& op, const std::vector& uids) const { + if (op->get_desc().is_box() || + (op->get_type() == OpType::Conditional && + static_cast(*op).get_op()->get_desc().is_box())) + return false; + if (uids.size() == 1) { // TODO: for simple case here this should probably not pass if // node_exists[uids[0]] == FALSE, but should be fine for now? diff --git a/tket/src/Architecture/CMakeLists.txt b/tket/src/Architecture/CMakeLists.txt index 30f3b33135..c49a5a02ad 100644 --- a/tket/src/Architecture/CMakeLists.txt +++ b/tket/src/Architecture/CMakeLists.txt @@ -23,6 +23,9 @@ add_library(tket-${COMP} ArchitectureGraphClasses.cpp) list(APPEND DEPS_${COMP} + Circuit + OpType + Ops Graphs Utils) diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index b7c3d975d0..4d76614133 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -21,8 +21,10 @@ #include #include +#include "Circuit/Conditional.hpp" #include "Graphs/CompleteGraph.hpp" #include "Graphs/DirectedGraph.hpp" +#include "Ops/OpPtr.hpp" #include "Utils/BiMapHeaders.hpp" #include "Utils/EigenConfig.hpp" #include "Utils/Json.hpp" @@ -102,8 +104,7 @@ class Architecture : public ArchitectureBase> { */ node_set_t get_articulation_points(const Architecture &subarc) const; - bool valid_operation( - /*const OpType& optype, */ const std::vector &uids) const; + bool valid_operation(const Op_ptr &op, const std::vector &uids) const; /** * Sub-architecture generated by a subset of nodes. diff --git a/tket/src/Circuit/include/Circuit/Circuit.hpp b/tket/src/Circuit/include/Circuit/Circuit.hpp index 3d24de524a..dbd41d81c8 100644 --- a/tket/src/Circuit/include/Circuit/Circuit.hpp +++ b/tket/src/Circuit/include/Circuit/Circuit.hpp @@ -1059,6 +1059,11 @@ class Circuit { std::shared_ptr b_frontier, const std::function &skip_func) const; + // given current slice of quantum frontier, returns the next slice. + // ignore classical and boolean edges + CutFrontier next_q_cut( + std::shared_ptr u_frontier) const; + /** * Depth of circuit. * @@ -1375,6 +1380,13 @@ class Circuit { */ Circuit conditional_circuit(const bit_vector_t &bits, unsigned value) const; + /** + * Replaces one \ref vertex by applying \ref Box::to_circuit + * + * @return whether the vertex holds a box or a conditional box + */ + bool substitute_box_vertex(Vertex &vert, VertexDeletion vertex_deletion); + /** * Replaces each \ref Box operation by applying \ref Box::to_circuit * diff --git a/tket/src/Circuit/macro_circ_info.cpp b/tket/src/Circuit/macro_circ_info.cpp index e453e79cca..76b35732ec 100644 --- a/tket/src/Circuit/macro_circ_info.cpp +++ b/tket/src/Circuit/macro_circ_info.cpp @@ -517,6 +517,44 @@ CutFrontier Circuit::next_cut( get_next_b_frontier(*this, b_frontier, u_frontier, next_slice_lookup)}; } +CutFrontier Circuit::next_q_cut( + std::shared_ptr u_frontier) const { + auto next_slice = std::make_shared(); + VertexSet next_slice_lookup; + VertexSet bad_vertices; + EdgeSet edge_lookup; + for (const std::pair& pair : u_frontier->get()) { + edge_lookup.insert(pair.second); + } + + // find the next slice first + for (const std::pair& pair : u_frontier->get()) { + Vertex try_v = target(pair.second); + if (detect_final_Op(try_v)) continue; + if (next_slice_lookup.find(try_v) != next_slice_lookup.end()) + continue; // already going to be in next slice + bool good_vertex = bad_vertices.find(try_v) == bad_vertices.end(); + if (!good_vertex) continue; + EdgeVec ins = get_in_edges(try_v); + for (const Edge& in : ins) { + if (edge_lookup.find(in) == edge_lookup.end() && + get_edgetype(in) == EdgeType::Quantum) { + good_vertex = false; + bad_vertices.insert(try_v); + break; + } + } + if (good_vertex) { + next_slice_lookup.insert(try_v); + next_slice->push_back(try_v); + } + } + + return { + next_slice, get_next_u_frontier(*this, u_frontier, next_slice_lookup), + std::make_shared()}; +} + SliceVec Circuit::get_reverse_slices() const { vertex_map_t mapping; vertex_map_t rev_mapping; diff --git a/tket/src/Circuit/macro_manipulation.cpp b/tket/src/Circuit/macro_manipulation.cpp index 3f6c587cbd..c587b38fa2 100644 --- a/tket/src/Circuit/macro_manipulation.cpp +++ b/tket/src/Circuit/macro_manipulation.cpp @@ -629,27 +629,34 @@ Circuit Circuit::conditional_circuit( return cond_circ; } +bool Circuit::substitute_box_vertex( + Vertex& vert, VertexDeletion vertex_deletion) { + Op_ptr op = get_Op_ptr_from_Vertex(vert); + bool conditional = op->get_type() == OpType::Conditional; + if (conditional) { + const Conditional& cond = static_cast(*op); + op = cond.get_op(); + } + if (!op->get_desc().is_box()) return false; + const Box& b = static_cast(*op); + Circuit replacement = *b.to_circuit(); + if (conditional) { + substitute_conditional( + replacement, vert, vertex_deletion, OpGroupTransfer::Merge); + } else { + substitute(replacement, vert, vertex_deletion, OpGroupTransfer::Merge); + } + return true; +} + bool Circuit::decompose_boxes() { bool success = false; VertexList bin; BGL_FORALL_VERTICES(v, dag, DAG) { - Op_ptr op = get_Op_ptr_from_Vertex(v); - bool conditional = op->get_type() == OpType::Conditional; - if (conditional) { - const Conditional& cond = static_cast(*op); - op = cond.get_op(); - } - if (!op->get_desc().is_box()) continue; - const Box& b = static_cast(*op); - Circuit replacement = *b.to_circuit(); - if (conditional) { - substitute_conditional( - replacement, v, VertexDeletion::No, OpGroupTransfer::Merge); - } else { - substitute(replacement, v, VertexDeletion::No, OpGroupTransfer::Merge); + if (substitute_box_vertex(v, VertexDeletion::No)) { + bin.push_back(v); + success = true; } - bin.push_back(v); - success = true; } remove_vertices(bin, GraphRewiring::No, VertexDeletion::Yes); return success; diff --git a/tket/src/Mapping/BoxDecomposition.cpp b/tket/src/Mapping/BoxDecomposition.cpp new file mode 100644 index 0000000000..cd52143ece --- /dev/null +++ b/tket/src/Mapping/BoxDecomposition.cpp @@ -0,0 +1,71 @@ +#include "Mapping/BoxDecomposition.hpp" + +#include "Mapping/MappingFrontier.hpp" + +namespace tket { + +BoxDecomposition::BoxDecomposition( + const ArchitecturePtr &_architecture, + std::shared_ptr &_mapping_frontier) + : architecture_(_architecture), mapping_frontier_(_mapping_frontier) {} + +void BoxDecomposition::solve() { + // Box type vertices are later removed from DAG + VertexList bin; + + std::shared_ptr frontier_edges = + frontier_convert_vertport_to_edge( + this->mapping_frontier_->circuit_, + this->mapping_frontier_->quantum_boundary); + CutFrontier next_cut = + this->mapping_frontier_->circuit_.next_q_cut(frontier_edges); + for (Vertex &vert : *next_cut.slice) { + if (this->mapping_frontier_->circuit_.substitute_box_vertex( + vert, Circuit::VertexDeletion::No)) + bin.push_back(vert); + } + + // Delete vertices + this->mapping_frontier_->circuit_.remove_vertices( + bin, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); +} + +BoxDecompositionRoutingMethod::BoxDecompositionRoutingMethod(){}; + +bool BoxDecompositionRoutingMethod::check_method( + const std::shared_ptr &mapping_frontier, + const ArchitecturePtr & /*architecture*/) const { + std::shared_ptr frontier_edges = + frontier_convert_vertport_to_edge( + mapping_frontier->circuit_, mapping_frontier->quantum_boundary); + CutFrontier next_cut = mapping_frontier->circuit_.next_q_cut(frontier_edges); + for (const Vertex &vert : *next_cut.slice) { + Op_ptr op = mapping_frontier->circuit_.get_Op_ptr_from_Vertex(vert); + if (op->get_desc().is_box() || + (op->get_type() == OpType::Conditional && + static_cast(*op).get_op()->get_desc().is_box())) + return true; + } + return false; +} + +unit_map_t BoxDecompositionRoutingMethod::routing_method( + std::shared_ptr &mapping_frontier, + const ArchitecturePtr &architecture) const { + BoxDecomposition bd(architecture, mapping_frontier); + bd.solve(); + return {}; +} + +nlohmann::json BoxDecompositionRoutingMethod::serialize() const { + nlohmann::json j; + j["name"] = "BoxDecompositionRoutingMethod"; + return j; +} + +BoxDecompositionRoutingMethod BoxDecompositionRoutingMethod::deserialize( + const nlohmann::json & /*j*/) { + return BoxDecompositionRoutingMethod(); +} + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt index 11839fc85d..84fa8626b5 100644 --- a/tket/src/Mapping/CMakeLists.txt +++ b/tket/src/Mapping/CMakeLists.txt @@ -25,6 +25,7 @@ add_library(tket-${COMP} MappingFrontier.cpp MappingManager.cpp MultiGateReorder.cpp + BoxDecomposition.cpp RoutingMethodCircuit.cpp RoutingMethodJson.cpp Verification.cpp) diff --git a/tket/src/Mapping/LexiLabelling.cpp b/tket/src/Mapping/LexiLabelling.cpp index f748d94c2c..1524c98aac 100644 --- a/tket/src/Mapping/LexiLabelling.cpp +++ b/tket/src/Mapping/LexiLabelling.cpp @@ -18,38 +18,26 @@ namespace tket { bool LexiLabellingMethod::check_method( const std::shared_ptr& mapping_frontier, const ArchitecturePtr& architecture) const { - std::set already_checked; - // *it = {UnitID, {Vertex, Port}} - for (auto it = mapping_frontier->quantum_boundary->get().begin(); - it != mapping_frontier->quantum_boundary->get().end(); ++it) { - Edge e0 = mapping_frontier->circuit_.get_nth_out_edge( - it->second.first, it->second.second); - Vertex v0 = mapping_frontier->circuit_.target(e0); - Node node = Node(it->first); - // i.e. skip already checked vertices - if (already_checked.find(node) == already_checked.end()) { - already_checked.insert(node); - // for countercircuit_.n_in_edges_of_type(v0, EdgeType::Quantum); - int counter = 1; // 1 edge - auto jt = it; - ++jt; - while (jt != mapping_frontier->quantum_boundary->get().end() && - counter < n_edges) { - Edge e1 = mapping_frontier->circuit_.get_nth_out_edge( - jt->second.first, jt->second.second); - Vertex v1 = mapping_frontier->circuit_.target(e1); - if (v0 == v1) { - counter++; - // confirms that there is at least one multi-qubit gate in the first - // layer which is assigned to some Qubit not in the architecture - if (!architecture->node_exists(Node(it->first)) || - !architecture->node_exists(Node(jt->first))) { + std::shared_ptr frontier_edges = + frontier_convert_vertport_to_edge( + mapping_frontier->circuit_, mapping_frontier->quantum_boundary); + CutFrontier next_cut = mapping_frontier->circuit_.next_q_cut(frontier_edges); + + for (const Vertex& vert : *next_cut.slice) { + EdgeVec ev = mapping_frontier->circuit_.get_in_edges_of_type( + vert, EdgeType::Quantum); + // lexilabelling can't support dynamic labelling of >2 qubit gates + if (ev.size() > 2) { + return false; + } + for (const Edge& e : ev) { + for (const std::pair& pair : + frontier_edges->get()) { + if (pair.second == e) { + if (!architecture->node_exists(Node(pair.first))) { return true; } } - ++jt; } } } diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index 563d61fa38..ef49ae41e1 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -510,23 +510,36 @@ LexiRouteRoutingMethod::LexiRouteRoutingMethod(unsigned _max_depth) bool LexiRouteRoutingMethod::check_method( const std::shared_ptr& mapping_frontier, const ArchitecturePtr& architecture) const { - std::set unplaced; - for (const std::pair& pair : - mapping_frontier->quantum_boundary->get()) { - // only supports single qubit, two-qubit gates, barrier gates - // and BRIDGE gates added by the routing code + std::shared_ptr frontier_edges = + frontier_convert_vertport_to_edge( + mapping_frontier->circuit_, mapping_frontier->quantum_boundary); + CutFrontier next_cut = mapping_frontier->circuit_.next_q_cut(frontier_edges); + for (const Vertex& vert : *next_cut.slice) { + Op_ptr op = mapping_frontier->circuit_.get_Op_ptr_from_Vertex(vert); + // can't work wih box ops, or gates with more than 2 qubits that aren't a + // BRIDGE + if ((mapping_frontier->circuit_.n_in_edges_of_type( - pair.second.first, EdgeType::Quantum) > 2 && - mapping_frontier->circuit_.get_OpType_from_Vertex(pair.second.first) != - OpType::BRIDGE)) { + vert, EdgeType::Quantum) > 2 && + op->get_type() != OpType::BRIDGE) || + (op->get_desc().is_box() || (op->get_type() == OpType::Conditional && + static_cast(*op) + .get_op() + ->get_desc() + .is_box()))) { return false; - } else if (!architecture->node_exists(Node(pair.first))) { - // if multi-qubit vertex doesn't have all edges in frontier then - // won't be check in routing_method anyway - if (unplaced.find(pair.second.first) == unplaced.end()) { - unplaced.insert(pair.second.first); - } else { - return false; + } else { + // second check that all input UnitID are actually in architecture + for (const Edge& e : mapping_frontier->circuit_.get_in_edges_of_type( + vert, EdgeType::Quantum)) { + for (const std::pair& pair : + frontier_edges->get()) { + if (pair.second == e) { + if (!architecture->node_exists(Node(pair.first))) { + return false; + } + } + } } } } diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index 86fd62bc8c..e435dc4261 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -218,19 +218,8 @@ void MappingFrontier::advance_frontier_boundary( std::shared_ptr frontier_edges = frontier_convert_vertport_to_edge( this->circuit_, this->quantum_boundary); - // Add all classical edges that share the same target - unsigned dummy_bit_index = 0; - for (const std::pair& pair : frontier_edges->get()) { - Vertex vert = this->circuit_.target(pair.second); - for (const Edge& e : - this->circuit_.get_in_edges_of_type(vert, EdgeType::Classical)) { - frontier_edges->insert({Bit(dummy_bit_index), e}); - dummy_bit_index++; - } - } - CutFrontier next_cut = this->circuit_.next_cut( - frontier_edges, std::make_shared()); + CutFrontier next_cut = this->circuit_.next_q_cut(frontier_edges); // For each vertex in a slice, if its physically permitted, update // quantum_boundary with quantum out edges from vertex (i.e. @@ -252,8 +241,7 @@ void MappingFrontier::advance_frontier_boundary( nodes.push_back(Node(uid)); } if (architecture->valid_operation( - /* this->circuit_.get_OpType_from_Vertex(vert), */ - nodes) || + this->circuit_.get_Op_ptr_from_Vertex(vert), nodes) || this->circuit_.get_OpType_from_Vertex(vert) == OpType::Barrier) { // if no valid operation, boundary not updated and while loop terminates boundary_updated = true; diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index c05ea06122..59f052c2bc 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -67,8 +67,9 @@ bool is_physically_permitted( for (port_t port = 0; port < frontier->circuit_.n_ports(vert); ++port) { nodes.push_back(Node(get_unitid_from_vertex_port(frontier, {vert, port}))); } + Op_ptr op = frontier->circuit_.get_Op_ptr_from_Vertex(vert); - return arc_ptr->valid_operation(nodes); + return arc_ptr->valid_operation(op, nodes); } // This method will try to commute a vertex to the quantum frontier diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index b2d3ada7fd..86eac10524 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -45,6 +45,9 @@ void from_json(const nlohmann::json& j, std::vector& rmp_v) { } else if (name == "MultiGateReorderRoutingMethod") { rmp_v.push_back(std::make_shared( MultiGateReorderRoutingMethod::deserialize(c))); + } else if (name == "BoxDecompositionRoutingMethod") { + rmp_v.push_back(std::make_shared( + BoxDecompositionRoutingMethod::deserialize(c))); } else { std::logic_error( "Deserialization for given RoutingMethod not supported."); diff --git a/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp new file mode 100644 index 0000000000..8b1cd45fa4 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp @@ -0,0 +1,63 @@ +#ifndef _TKET_BoxDecomposition_H_ +#define _TKET_BoxDecomposition_H_ + +#include "Mapping/MappingFrontier.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class BoxDecomposition { + public: + /** + * Class Constructor + * @param _architecture Architecture object added operations must respect + * @param _mapping_frontier Contains Circuit object to be modified + */ + BoxDecomposition( + const ArchitecturePtr& _architecture, + std::shared_ptr& _mapping_frontier); + + /** + * Decompose any boxes in the next slice after the frontier + */ + void solve(); + + private: + // Architecture all new physical operations must respect + ArchitecturePtr architecture_; + std::shared_ptr mapping_frontier_; +}; + +class BoxDecompositionRoutingMethod : public RoutingMethod { + public: + /** + * Decompose any boxes on the frontier + */ + BoxDecompositionRoutingMethod(); + + /** + * @return true if method can route subcircuit, false if not + */ + bool check_method( + const std::shared_ptr& mapping_frontier, + const ArchitecturePtr& /*architecture*/) const override; + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + unit_map_t routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + nlohmann::json serialize() const override; + + static BoxDecompositionRoutingMethod deserialize(const nlohmann::json& /*j*/); +}; + +} // namespace tket + +#endif \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp index cb7a51c300..317cf9b6d7 100644 --- a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp +++ b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp @@ -59,8 +59,8 @@ class MultiGateReorderRoutingMethod : public RoutingMethod { * @return true if method can route subcircuit, false if not */ bool check_method( - const std::shared_ptr& /*mapping_frontier*/, - const ArchitecturePtr& /*architecture*/) const override; + const std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; /** * @param mapping_frontier Contains boundary of routed/unrouted circuit for diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp index 9cbdb22e90..0a12ed92ff 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp @@ -14,6 +14,7 @@ #pragma once +#include "Mapping/BoxDecomposition.hpp" #include "Mapping/LexiRoute.hpp" #include "Mapping/MultiGateReorder.hpp" #include "Mapping/RoutingMethod.hpp" diff --git a/tket/tests/Circuit/test_Circ.cpp b/tket/tests/Circuit/test_Circ.cpp index 7d0f43f48b..d60b463a98 100644 --- a/tket/tests/Circuit/test_Circ.cpp +++ b/tket/tests/Circuit/test_Circ.cpp @@ -1288,6 +1288,35 @@ SCENARIO("Test next slice") { } } +SCENARIO("Test next quantum slice") { + GIVEN("A simple circuit") { + Circuit circ(3, 1); + Vertex v1 = circ.add_op(OpType::X, {0}); + Vertex v2 = + circ.add_conditional_gate(OpType::Rx, {0.6}, {1}, {0}, 1); + Vertex v3 = + circ.add_conditional_gate(OpType::Ry, {0.6}, {2}, {0}, 1); + Vertex v4 = circ.add_op(OpType::S, {2}); + Vertex v5 = circ.add_op(OpType::T, {1}); + + auto frontier = std::make_shared(); + for (const Qubit& q : circ.all_qubits()) { + Vertex in = circ.get_in(q); + frontier->insert({q, circ.get_nth_out_edge(in, 0)}); + } + CutFrontier slice_front = circ.next_q_cut(frontier); + Slice sl = *slice_front.slice; + WHEN("The frontier is calculated from inputs") { + THEN("The first slice is recovered accurately.") { + REQUIRE(sl.size() == 3); + REQUIRE(sl[0] == v1); + REQUIRE(sl[1] == v2); + REQUIRE(sl[2] == v3); + } + } + } +} + SCENARIO("Test circuit.transpose() method") { GIVEN("Simple circuit") { Circuit circ(2); diff --git a/tket/tests/test_BoxDecompRoutingMethod.cpp b/tket/tests/test_BoxDecompRoutingMethod.cpp new file mode 100644 index 0000000000..41577f9034 --- /dev/null +++ b/tket/tests/test_BoxDecompRoutingMethod.cpp @@ -0,0 +1,138 @@ +#include + +#include "Mapping/BoxDecomposition.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Predicates/Predicates.hpp" +#include "Simulation/CircuitSimulator.hpp" +#include "Simulation/ComparisonFunctions.hpp" + +namespace tket { +SCENARIO("Decompose boxes") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + Eigen::Matrix4cd m; + m << 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0; + Unitary2qBox ubox(m); + + GIVEN("A box") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + + circ.add_box(ubox, {0, 2}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + std::shared_ptr mf = + std::make_shared(circ); + BoxDecomposition bd(shared_arc, mf); + bd.solve(); + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + std::vector commands = mf->circuit_.get_commands(); + for (Command c : commands) { + REQUIRE(!c.get_op_ptr()->get_desc().is_box()); + } + } + + GIVEN("A conditional box") { + Circuit circ(4, 1); + std::vector qubits = circ.all_qubits(); + Conditional cond(std::make_shared(ubox), 1, 1); + circ.add_op( + std::make_shared(cond), {Bit(0), Qubit(0), Qubit(1)}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + BoxDecomposition bd(shared_arc, mf); + bd.solve(); + std::vector commands = mf->circuit_.get_commands(); + for (Command c : commands) { + Op_ptr op = c.get_op_ptr(); + REQUIRE( + !(op->get_desc().is_box() || (op->get_type() == OpType::Conditional && + static_cast(*op) + .get_op() + ->get_desc() + .is_box()))); + } + } + + GIVEN("Test BoxDecompositionRoutingMethod") { + Circuit circ(4, 1); + std::vector qubits = circ.all_qubits(); + circ.add_box(ubox, {0, 3}); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + circ.add_box(ubox, {1, 3}); + circ.add_box(ubox, {0, 1}); + circ.add_op(OpType::X, {qubits[1]}); + circ.add_op(OpType::Measure, {0, 0}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + MappingManager mm(shared_arc); + std::vector vrm = { + + std::make_shared(10), + std::make_shared()}; + bool res = mm.route_circuit(circ, vrm); + REQUIRE(res); + PredicatePtr routed_correctly = + std::make_shared(architecture); + REQUIRE(routed_correctly->verify(circ)); + std::vector commands = mf->circuit_.get_commands(); + for (Command c : commands) { + REQUIRE(!c.get_op_ptr()->get_desc().is_box()); + } + } +} + +SCENARIO("Test JSON serialisation for BoxDecompositionRoutingMethod") { + GIVEN("BoxDecompositionRoutingMethod") { + nlohmann::json j_rm; + j_rm["name"] = "BoxDecompositionRoutingMethod"; + BoxDecompositionRoutingMethod rm_loaded = + BoxDecompositionRoutingMethod::deserialize(j_rm); + nlohmann::json j_rm_serialised = rm_loaded.serialize(); + REQUIRE(j_rm == j_rm_serialised); + } + + GIVEN("BoxDecompositionRoutingMethod vector") { + nlohmann::json j_rms = { + {{"name", "BoxDecompositionRoutingMethod"}}, + { + {"name", "LexiRouteRoutingMethod"}, + {"depth", 3}, + }}; + std::vector rms = + j_rms.get>(); + nlohmann::json j_rms_serialised = rms; + REQUIRE(j_rms == j_rms_serialised); + } +} + +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MappingFrontier.cpp b/tket/tests/test_MappingFrontier.cpp index bb33f0f095..9a82006a07 100644 --- a/tket/tests/test_MappingFrontier.cpp +++ b/tket/tests/test_MappingFrontier.cpp @@ -86,6 +86,36 @@ SCENARIO("Test MappingFrontier initialisation, advance_frontier_boundary.") { REQUIRE(mf.circuit_.source(e3) == v9); REQUIRE(mf.circuit_.target(e3) == v3); } + + GIVEN("A circuit with measurements and classically controlled operations") { + Circuit circ(3, 1); + std::vector qubits = circ.all_qubits(); + // All gates are physically permitted + Vertex v0 = circ.add_op(OpType::Measure, {0, 0}); + Vertex v1 = + circ.add_conditional_gate(OpType::Rx, {0.6}, {0}, {0}, 1); + Vertex v2 = + circ.add_conditional_gate(OpType::Rz, {0.6}, {1}, {0}, 1); + Vertex v3 = circ.add_op(OpType::X, {2}); + std::vector nodes = {Node(0), Node(1), Node(2)}; + + Architecture arc({{nodes[0], nodes[1]}, {nodes[1], nodes[2]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + mf.advance_frontier_boundary(shared_arc); + VertPort vp0 = mf.quantum_boundary->get().find(nodes[0])->second; + VertPort vp1 = mf.quantum_boundary->get().find(nodes[1])->second; + VertPort vp2 = mf.quantum_boundary->get().find(nodes[2])->second; + Op_ptr op = circ.get_Op_ptr_from_Vertex(vp0.first); + Op_ptr op2 = circ.get_Op_ptr_from_Vertex(vp1.first); + Op_ptr op3 = circ.get_Op_ptr_from_Vertex(vp2.first); + REQUIRE(vp0.first == v1); + REQUIRE(vp1.first == v2); + REQUIRE(vp2.first == v3); + } } SCENARIO("Test MappingFrontier get_default_to_quantum_boundary_unit_map") { diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index 12117c2955..2e8e4c099a 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -44,7 +44,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -85,7 +85,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -131,7 +131,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -178,7 +178,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -220,8 +220,10 @@ SCENARIO("Reorder circuits with limited search space") { // Check only the first valid CZ get commuted to the front std::vector commands = circ.get_commands(); REQUIRE(shared_arc->valid_operation( + commands[0].get_op_ptr(), {Node(commands[0].get_args()[0]), Node(commands[0].get_args()[1])})); REQUIRE(!shared_arc->valid_operation( + commands[0].get_op_ptr(), {Node(commands[1].get_args()[0]), Node(commands[1].get_args()[1])})); const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -273,7 +275,7 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -297,13 +299,13 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { for (auto arg : commands2[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands2[i].get_op_ptr(), nodes)); } std::vector nodes; for (auto arg : commands2[4].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(!shared_arc->valid_operation(nodes)); + REQUIRE(!shared_arc->valid_operation(commands2[4].get_op_ptr(), nodes)); const auto u2 = tket_sim::get_unitary(circ2); REQUIRE(tket_sim::compare_statevectors_or_unitaries( u2, u1, tket_sim::MatrixEquivalence::EQUAL)); @@ -358,7 +360,7 @@ SCENARIO("Test MappingManager with MultiGateReorderRoutingMethod") { } } -SCENARIO("Test JSON serialisation") { +SCENARIO("Test JSON serialisation for MultiGateReorderRoutingMethod") { GIVEN("MultiGateReorderRoutingMethod") { nlohmann::json j_rm; j_rm["name"] = "MultiGateReorderRoutingMethod"; diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index d68cd25df6..8bdd52170b 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -639,10 +639,11 @@ SCENARIO("Test compiler pass serializations") { nlohmann::json j_loaded = loaded; REQUIRE(j_pp == j_loaded); } - GIVEN("Routing with MultiGateReorderRoutingMethod") { + GIVEN("Routing with multiple routing methods") { RoutingMethodPtr mrmp = std::make_shared(60, 80); - std::vector mrcon = {mrmp, rmp}; + RoutingMethodPtr brmp = std::make_shared(); + std::vector mrcon = {mrmp, rmp, brmp}; Circuit circ = CircuitsForTesting::get().uccsd; CompilationUnit cu{circ}; PassPtr placement = gen_placement_pass(place); diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 4fcdd0481e..1e40106386 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -95,6 +95,7 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/test_LexicographicalComparison.cpp ${TKET_TESTS_DIR}/test_LexiRoute.cpp ${TKET_TESTS_DIR}/test_MultiGateReorder.cpp + ${TKET_TESTS_DIR}/test_BoxDecompRoutingMethod.cpp ${TKET_TESTS_DIR}/test_DeviceCharacterisation.cpp ${TKET_TESTS_DIR}/test_Clifford.cpp ${TKET_TESTS_DIR}/test_MeasurementSetup.cpp From 069593504d3aefbf65ca228c37f5e8ad945b01c0 Mon Sep 17 00:00:00 2001 From: cqc-melf <70640934+cqc-melf@users.noreply.github.com> Date: Thu, 10 Feb 2022 17:22:13 +0100 Subject: [PATCH 055/146] update ci check (#210) * update ci check * Update build_and_test.yml * Update build_and_test.yml --- .github/workflows/build_and_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index d828de914c..1e6ccc46b4 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -5,7 +5,7 @@ on: branches: - main - develop - - feature/routing-v3 + - feature/RV3.1 push: branches: - develop From bac8cc2eb27f7f1a280ff3c499662a962c1c3965 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Fri, 11 Feb 2022 15:23:37 +0000 Subject: [PATCH 056/146] Update build_and_test.yml --- .github/workflows/build_and_test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index d828de914c..dbe4cacdf7 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -5,7 +5,6 @@ on: branches: - main - develop - - feature/routing-v3 push: branches: - develop From 2d2d25d295b1e107a87ee5fdbfff9b5214133f38 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Fri, 11 Feb 2022 15:29:07 +0000 Subject: [PATCH 057/146] Update changelog.rst --- pytket/docs/changelog.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pytket/docs/changelog.rst b/pytket/docs/changelog.rst index 9cbffbb751..a9ec92ac56 100644 --- a/pytket/docs/changelog.rst +++ b/pytket/docs/changelog.rst @@ -10,6 +10,13 @@ API changes: ``map`` property instead.) * The deprecated ``Backend.compile_circuit`` method is removed. (Use ``get_compiled_circuit`` instead.) +* The ``routing`` module is removed. +* ``Placement``, ``LinePlacement``, ``GraphPlacement`` and ``NoiseAwarePlacement`` + are now imported from the ``placement`` module. +* ``Architecture``, ``SquareGrid``, ``RingArch`` and ``FullyConnected`` are now + imported from the ``architecture`` module. +* Methods for mapping logical to physical circuits are now available in the + ``mapping`` module, with a new API and new functionality. 0.19.0 (February 2022) ---------------------- From 20404bff111e62fe0b63fb0782f62a281393a73f Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Fri, 11 Feb 2022 15:30:06 +0000 Subject: [PATCH 058/146] Update copyright dates 2021 -> 2022 --- pytket/binders/architecture.cpp | 2 +- pytket/pytket/architecture/__init__.py | 2 +- pytket/pytket/mapping/__init__.py | 2 +- pytket/pytket/placement/__init__.py | 2 +- tket/src/TokenSwapping/ArchitectureMapping.cpp | 2 +- tket/src/TokenSwapping/BestFullTsa.cpp | 2 +- tket/src/TokenSwapping/CyclesCandidateManager.cpp | 2 +- tket/src/TokenSwapping/CyclesGrowthManager.cpp | 2 +- tket/src/TokenSwapping/CyclesPartialTsa.cpp | 2 +- tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp | 2 +- tket/src/TokenSwapping/DistancesFromArchitecture.cpp | 2 +- tket/src/TokenSwapping/DistancesInterface.cpp | 2 +- tket/src/TokenSwapping/DynamicTokenTracker.cpp | 2 +- tket/src/TokenSwapping/HybridTsa00.cpp | 2 +- tket/src/TokenSwapping/NeighboursFromArchitecture.cpp | 2 +- tket/src/TokenSwapping/NeighboursInterface.cpp | 2 +- tket/src/TokenSwapping/PartialTsaInterface.cpp | 2 +- tket/src/TokenSwapping/PathFinderInterface.cpp | 2 +- tket/src/TokenSwapping/RNG.cpp | 2 +- tket/src/TokenSwapping/RiverFlowPathFinder.cpp | 2 +- tket/src/TokenSwapping/SwapListOptimiser.cpp | 2 +- tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp | 2 +- tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp | 2 +- tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp | 2 +- tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp | 2 +- tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp | 2 +- tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp | 2 +- tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp | 2 +- tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp | 2 +- tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp | 2 +- tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp | 2 +- tket/src/TokenSwapping/TableLookup/SwapConversion.cpp | 2 +- tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp | 2 +- tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp | 2 +- tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp | 2 +- tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp | 2 +- tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp | 2 +- tket/src/TokenSwapping/TrivialTSA.cpp | 2 +- tket/src/TokenSwapping/VectorListHybridSkeleton.cpp | 2 +- .../TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp | 2 +- tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp | 2 +- .../include/TokenSwapping/CanonicalRelabelling.hpp | 2 +- .../include/TokenSwapping/CyclesCandidateManager.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp | 2 +- .../include/TokenSwapping/CyclicShiftCostEstimate.hpp | 2 +- tket/src/TokenSwapping/include/TokenSwapping/DebugFunctions.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp | 2 +- .../include/TokenSwapping/DistancesFromArchitecture.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/DistancesInterface.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp | 2 +- .../include/TokenSwapping/FilteredSwapSequences.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp | 2 +- tket/src/TokenSwapping/include/TokenSwapping/HybridTsa00.hpp | 2 +- .../include/TokenSwapping/NeighboursFromArchitecture.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp | 2 +- .../include/TokenSwapping/PartialMappingLookup.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp | 2 +- tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp | 2 +- tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp | 2 +- tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp | 2 +- .../include/TokenSwapping/SwapListSegmentOptimiser.hpp | 2 +- .../include/TokenSwapping/SwapListTableOptimiser.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp | 2 +- tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp | 2 +- .../include/TokenSwapping/VectorListHybridSkeleton.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp | 2 +- .../include/TokenSwapping/VertexMappingFunctions.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp | 2 +- .../include/TokenSwapping/main_entry_functions.hpp | 2 +- tket/src/TokenSwapping/main_entry_functions.cpp | 2 +- tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp | 2 +- tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp | 2 +- tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp | 2 +- tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp | 2 +- tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp | 2 +- tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp | 2 +- tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp | 2 +- tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp | 2 +- .../TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp | 2 +- .../TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp | 2 +- .../TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp | 2 +- .../tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp | 2 +- .../TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp | 2 +- .../TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp | 2 +- tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp | 2 +- .../TestUtils/ArchitectureEdgesReimplementation.cpp | 2 +- .../TestUtils/ArchitectureEdgesReimplementation.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp | 2 +- tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp | 2 +- tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp | 2 +- tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp | 2 +- tket/tests/TokenSwapping/test_FullTsa.cpp | 2 +- tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp | 2 +- tket/tests/TokenSwapping/test_SwapList.cpp | 2 +- tket/tests/TokenSwapping/test_SwapListOptimiser.cpp | 2 +- tket/tests/TokenSwapping/test_VariousPartialTsa.cpp | 2 +- tket/tests/TokenSwapping/test_VectorListHybrid.cpp | 2 +- tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp | 2 +- tket/tests/TokenSwapping/test_main_entry_functions.cpp | 2 +- 116 files changed, 116 insertions(+), 116 deletions(-) diff --git a/pytket/binders/architecture.cpp b/pytket/binders/architecture.cpp index 8a2ff121c5..95f63acd58 100644 --- a/pytket/binders/architecture.cpp +++ b/pytket/binders/architecture.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/pytket/pytket/architecture/__init__.py b/pytket/pytket/architecture/__init__.py index 41556524e2..a1b9b99ccf 100644 --- a/pytket/pytket/architecture/__init__.py +++ b/pytket/pytket/architecture/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2019-2021 Cambridge Quantum Computing +# Copyright 2019-2022 Cambridge Quantum Computing # # You may not use this file except in compliance with the Licence. # You may obtain a copy of the Licence in the LICENCE file accompanying diff --git a/pytket/pytket/mapping/__init__.py b/pytket/pytket/mapping/__init__.py index faddc73127..15965513fa 100644 --- a/pytket/pytket/mapping/__init__.py +++ b/pytket/pytket/mapping/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2019-2021 Cambridge Quantum Computing +# Copyright 2019-2022 Cambridge Quantum Computing # # You may not use this file except in compliance with the Licence. # You may obtain a copy of the Licence in the LICENCE file accompanying diff --git a/pytket/pytket/placement/__init__.py b/pytket/pytket/placement/__init__.py index f2c2a3cce4..2c18b8d1b9 100644 --- a/pytket/pytket/placement/__init__.py +++ b/pytket/pytket/placement/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2019-2021 Cambridge Quantum Computing +# Copyright 2019-2022 Cambridge Quantum Computing # # You may not use this file except in compliance with the Licence. # You may obtain a copy of the Licence in the LICENCE file accompanying diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index 7f6e08dbfc..8011d3917a 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/BestFullTsa.cpp b/tket/src/TokenSwapping/BestFullTsa.cpp index 30c30bbaaa..ba78e080ee 100644 --- a/tket/src/TokenSwapping/BestFullTsa.cpp +++ b/tket/src/TokenSwapping/BestFullTsa.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.cpp b/tket/src/TokenSwapping/CyclesCandidateManager.cpp index 13a5698a64..3f7760c503 100644 --- a/tket/src/TokenSwapping/CyclesCandidateManager.cpp +++ b/tket/src/TokenSwapping/CyclesCandidateManager.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.cpp b/tket/src/TokenSwapping/CyclesGrowthManager.cpp index 894494e5d0..2797924868 100644 --- a/tket/src/TokenSwapping/CyclesGrowthManager.cpp +++ b/tket/src/TokenSwapping/CyclesGrowthManager.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/CyclesPartialTsa.cpp b/tket/src/TokenSwapping/CyclesPartialTsa.cpp index 046488f18d..972ec0c0e8 100644 --- a/tket/src/TokenSwapping/CyclesPartialTsa.cpp +++ b/tket/src/TokenSwapping/CyclesPartialTsa.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp index f5fe4a0050..88ff1228e6 100644 --- a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp index e8c629c87c..3ae327ceda 100644 --- a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/DistancesInterface.cpp b/tket/src/TokenSwapping/DistancesInterface.cpp index 35363c7505..a55d6b1f3f 100644 --- a/tket/src/TokenSwapping/DistancesInterface.cpp +++ b/tket/src/TokenSwapping/DistancesInterface.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/DynamicTokenTracker.cpp b/tket/src/TokenSwapping/DynamicTokenTracker.cpp index fe0e1dc234..18c65ba2ff 100644 --- a/tket/src/TokenSwapping/DynamicTokenTracker.cpp +++ b/tket/src/TokenSwapping/DynamicTokenTracker.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/HybridTsa00.cpp b/tket/src/TokenSwapping/HybridTsa00.cpp index 9f5df1fd19..6dc5ce7ebb 100644 --- a/tket/src/TokenSwapping/HybridTsa00.cpp +++ b/tket/src/TokenSwapping/HybridTsa00.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp index d93cfc8b13..57bc21724a 100644 --- a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/NeighboursInterface.cpp b/tket/src/TokenSwapping/NeighboursInterface.cpp index 7a5773e33c..805ffa02f7 100644 --- a/tket/src/TokenSwapping/NeighboursInterface.cpp +++ b/tket/src/TokenSwapping/NeighboursInterface.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/PartialTsaInterface.cpp b/tket/src/TokenSwapping/PartialTsaInterface.cpp index afac5357dd..f80248db72 100644 --- a/tket/src/TokenSwapping/PartialTsaInterface.cpp +++ b/tket/src/TokenSwapping/PartialTsaInterface.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/PathFinderInterface.cpp b/tket/src/TokenSwapping/PathFinderInterface.cpp index d8d03169e6..0f6f2993f3 100644 --- a/tket/src/TokenSwapping/PathFinderInterface.cpp +++ b/tket/src/TokenSwapping/PathFinderInterface.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/RNG.cpp b/tket/src/TokenSwapping/RNG.cpp index ab2c48e70d..daa3c05e4a 100644 --- a/tket/src/TokenSwapping/RNG.cpp +++ b/tket/src/TokenSwapping/RNG.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp index e80ab7516a..aa98bcff81 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/SwapListOptimiser.cpp b/tket/src/TokenSwapping/SwapListOptimiser.cpp index 42773437ce..eddf3ae63f 100644 --- a/tket/src/TokenSwapping/SwapListOptimiser.cpp +++ b/tket/src/TokenSwapping/SwapListOptimiser.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp index ce4943e493..82e1cd77e4 100644 --- a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp index 7463e35b09..cd2c9c7cf1 100644 --- a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp index dd75e1c98f..97041c0131 100644 --- a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp index 282b2efd36..870e51ed75 100644 --- a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index 8b812d53eb..c5445774cd 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp index 51c1c65fcc..b3ea011b44 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp index 1a58535764..da70765856 100644 --- a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp index 9b75744ae6..19a0a14c38 100644 --- a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp index 39fb0ccbdc..133758284d 100644 --- a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp +++ b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp index cc7c6a5dcb..d5a0239f4a 100644 --- a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp index 0c8d8ad3ad..1adfc1066b 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp index 3a36792882..3c72369ce4 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp index 6478942473..e3205feef4 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp index 21a2597558..3687a546c1 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp index 7060995b1b..ba5d662c9e 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp index 24cd00adb2..201ad7e539 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TrivialTSA.cpp b/tket/src/TokenSwapping/TrivialTSA.cpp index 7113ae236b..e4ed130b7e 100644 --- a/tket/src/TokenSwapping/TrivialTSA.cpp +++ b/tket/src/TokenSwapping/TrivialTSA.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp index b251f21244..b0a736b7b3 100644 --- a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp b/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp index dfe579bff4..44f006a555 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp index f476dbc0e8..a733f00958 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp index d509b5bcaa..98b37bf9b7 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp index 36cb7b84fd..c35bf4ea77 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp index 768c6b902e..68549cde8a 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp index c393270694..7de0597a25 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclicShiftCostEstimate.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclicShiftCostEstimate.hpp index 647839e9ac..fa3111afd5 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CyclicShiftCostEstimate.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclicShiftCostEstimate.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DebugFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DebugFunctions.hpp index a2fa9f1625..d234c5e930 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DebugFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DebugFunctions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp index 43612bfeb5..0008ba7a22 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp index 718b81d1e9..7f2dc0834d 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp index fa3488ba18..8f7f1a1063 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp index bc6e7ab8e1..c4be18d7c3 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp b/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp index dc3b1b8e72..ede9d94fca 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp b/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp index 72cea46c39..59088ed3b6 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp index 5f36a4f7d8..e4abcaae33 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa00.hpp b/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa00.hpp index 3c5d86d9b4..e5db31d851 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa00.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa00.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp index e32b531ebc..5a32f3cb5d 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp index 371552a37d..fe7be4387d 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp index deaa7dda72..ce2c8b5911 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp @@ -1,5 +1,5 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp index 2479a1907d..3083056ac4 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp index a6548f9fd4..9692e9c2a5 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp b/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp index ac6fc7c73a..99f93d0e2b 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp b/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp index 23546388df..6060b1d4a0 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp index 58868ef21c..40cdde8425 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp index b8fe8063d5..ac74509b68 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp index c3ce30d5ff..6df1b15cd2 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp index 2d180e06ed..87b5c72d31 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp @@ -1,5 +1,5 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp index ac815c2f7c..7a7532dd9a 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp @@ -1,5 +1,5 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp index 6ff7237f9a..0068bbfe18 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp b/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp index 7260e5c9f0..797a0b689a 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp index 6595f46c6b..d043249950 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp index 2bf9649ca7..e8b3b64fde 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp index b8dd971001..d1e8677b42 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp index 752040e3b5..de78dae7e1 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp index 441678c4d4..a8742a2670 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp index 14a1c7d4c6..c5a776bf19 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/main_entry_functions.cpp b/tket/src/TokenSwapping/main_entry_functions.cpp index 7390632a06..12507f2463 100644 --- a/tket/src/TokenSwapping/main_entry_functions.cpp +++ b/tket/src/TokenSwapping/main_entry_functions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp index e1f06aee30..4ed7727aa1 100644 --- a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp +++ b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp index 9681980374..6b9222a971 100644 --- a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp +++ b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp b/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp index 7a1a339b94..87ef4a4953 100644 --- a/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp +++ b/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp b/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp index a4846faad8..ab28fca9f9 100644 --- a/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp +++ b/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp index ca4eea1c4f..c3f27050b2 100644 --- a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp index 504360f9f0..7cedbb075d 100644 --- a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp index 5c139c697e..c3a9692121 100644 --- a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp +++ b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp index 3b452ae42a..abd4532ce8 100644 --- a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp +++ b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp index 74e4a27ade..67e64f3bcb 100644 --- a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp index 2adcf5f6d0..1ba570a2d1 100644 --- a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp index da96d908c1..6e0fa8f2da 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp index 9fbebf93cc..85c00e5548 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp index c169f84058..3af1da4756 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp index e4818c14d2..11242ea2fc 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp index 6bc7ed97fa..d73070888d 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp index 87e33d2595..9b94b9a7a8 100644 --- a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp +++ b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp index b6ddcd8f39..b730ad7bd2 100644 --- a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp +++ b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp index ab7c31886b..23727c7107 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp index 65c142b932..6afa674f0b 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp index a489add6e7..569243fd02 100644 --- a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp index 7a10b40fa2..fa7d139570 100644 --- a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp index 7659964336..20da142a46 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp index 730ba812b8..bb93aa43e2 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp index 722a6825a1..56bb7a805f 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp index fe4c3b9857..979b19de0e 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp index f51cea6763..2f6da5ceeb 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp index a8043b5a0d..2e81c5c82b 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp index 4cff62e020..7f45186ba7 100644 --- a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp +++ b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp index 182790a92b..efaaedd429 100644 --- a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp +++ b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp index 1980c5b9f9..431875618c 100644 --- a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp +++ b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp index 21a074d24d..6fedd4845d 100644 --- a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp +++ b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp index 648d220594..cf8d06d9f7 100644 --- a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp +++ b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_FullTsa.cpp b/tket/tests/TokenSwapping/test_FullTsa.cpp index b9aae17bcb..1715729e77 100644 --- a/tket/tests/TokenSwapping/test_FullTsa.cpp +++ b/tket/tests/TokenSwapping/test_FullTsa.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp index 239e14d9be..446fc39e74 100644 --- a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp +++ b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_SwapList.cpp b/tket/tests/TokenSwapping/test_SwapList.cpp index 4769d470b5..184f24ca32 100644 --- a/tket/tests/TokenSwapping/test_SwapList.cpp +++ b/tket/tests/TokenSwapping/test_SwapList.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp index 85242d0f36..d2b14880b4 100644 --- a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp +++ b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp index aba973ec54..d01d94cf7f 100644 --- a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp +++ b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp index 238b5e38a6..9ae0d81a4f 100644 --- a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp +++ b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp index d959935366..57e09601f9 100644 --- a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp +++ b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_main_entry_functions.cpp b/tket/tests/TokenSwapping/test_main_entry_functions.cpp index a0a110a0f6..59d31a27de 100644 --- a/tket/tests/TokenSwapping/test_main_entry_functions.cpp +++ b/tket/tests/TokenSwapping/test_main_entry_functions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. From 3cffd1f5946bc66607a2c7fd51498ad4c6cc59a2 Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Fri, 11 Feb 2022 15:32:09 +0000 Subject: [PATCH 059/146] Update pytket/binders/mapping.cpp Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> --- pytket/binders/mapping.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index a0fd34ec01..edaedcfaa7 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -34,7 +34,7 @@ PYBIND11_MODULE(mapping, m) { RoutingMethodCircuit, std::shared_ptr, RoutingMethod>( m, "RoutingMethodCircuit", - "The RoutingMethod class captures a method for partially mapping logical" + "The RoutingMethod class captures a method for partially mapping logical " "subcircuits to physical operations as permitted by some architecture. " "Ranked RoutingMethod objects are used by the MappingManager to route " "whole circuits.") From 7c092f2b99f41da88aa89d3c35b6432bae6784ef Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Fri, 11 Feb 2022 15:32:20 +0000 Subject: [PATCH 060/146] Update pytket/binders/mapping.cpp Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> --- pytket/binders/mapping.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index edaedcfaa7..3e7aae3520 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -49,7 +49,7 @@ PYBIND11_MODULE(mapping, m) { "that given a Circuit and Architecture object, returns a tuple " "containing a new modified circuit, the initial logical to physical " "qubit mapping of the modified circuit and the permutation of " - "'logical to physical qubit mapping given operations in the " + "logical to physical qubit mapping given operations in the " "modified circuit\n:param check_subcircuit: A function declaration " "that given a Circuit and Architecture object, returns a bool " "stating whether the given method can modify the " From 9a9007d8a499229c4c52701707c7726b2e024126 Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Fri, 11 Feb 2022 15:34:31 +0000 Subject: [PATCH 061/146] Update pytket/binders/mapping.cpp Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> --- pytket/binders/mapping.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index 3e7aae3520..78ea5a4435 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -75,8 +75,8 @@ PYBIND11_MODULE(mapping, m) { py::class_( m, "MappingManager", - "Defined by a pytket Architecture object, maps Circuit logical Qubits " - "to Physically permitted Architecture qubits. Mapping is completed by " + "Defined by a pytket Architecture object, maps Circuit logical qubits " + "to physically permitted Architecture qubits. Mapping is completed by " "sequential routing (full or partial) of subcircuits. Custom method for " "routing (full or partial) of subcircuits can be defined in python " "layer.") From 849c7c5c92cc803eda752d94d043db5805002d45 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Fri, 11 Feb 2022 16:31:11 +0000 Subject: [PATCH 062/146] Corrections for PR --- pytket/binders/mapping.cpp | 14 +++++------- pytket/binders/passes.cpp | 9 ++++---- pytket/pytket/architecture/__init__.py | 19 +++++++++------- pytket/pytket/mapping/__init__.py | 22 ++++++++++++------- pytket/pytket/placement/__init__.py | 21 +++++++++++------- pytket/tests/mapping_test.py | 11 +++------- .../include/Architecture/Architecture.hpp | 4 ++++ tket/src/CMakeLists.txt | 1 - tket/src/Characterisation/CMakeLists.txt | 1 - tket/src/Converters/CMakeLists.txt | 3 +-- tket/src/Diagonalisation/CMakeLists.txt | 3 +-- tket/src/Mapping/CMakeLists.txt | 7 +++++- .../src/Mapping/include/Mapping/LexiRoute.hpp | 2 +- tket/src/MeasurementSetup/CMakeLists.txt | 2 -- tket/src/PauliGraph/CMakeLists.txt | 1 - tket/src/Placement/CMakeLists.txt | 2 +- tket/src/Program/CMakeLists.txt | 2 -- tket/src/Simulation/CMakeLists.txt | 2 -- tket/src/Transformations/CMakeLists.txt | 2 -- tket/tests/test_CompilerPass.cpp | 1 + 20 files changed, 66 insertions(+), 63 deletions(-) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index a0fd34ec01..a20eaf193f 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -68,29 +68,27 @@ PYBIND11_MODULE(mapping, m) { .def( py::init(), "LexiRoute constructor.\n\n:param lookahead: Maximum depth of " - "lookahead " - "employed when picking SWAP for purpose of logical to physical " - "mapping.", + "lookahead employed when picking SWAP for purpose of logical to " + "physical mapping.", py::arg("lookahead") = 10); py::class_( m, "MappingManager", "Defined by a pytket Architecture object, maps Circuit logical Qubits " "to Physically permitted Architecture qubits. Mapping is completed by " - "sequential routing (full or partial) of subcircuits. Custom method for " - "routing (full or partial) of subcircuits can be defined in python " - "layer.") + "sequential routing (full or partial) of subcircuits. A custom method for " + "routing (full or partial) of subcircuits can be defined in Python.") .def( py::init(), "MappingManager constructor.\n\n:param architecture: pytket " - "Architecure object MappingManager object defined by.", + "Architecture object.", py::arg("architecture")) .def( "route_circuit", &MappingManager::route_circuit, "Maps from given logical circuit to physical circuit. Modification " "defined by route_subcircuit, but typically this proceeds by " "insertion of SWAP gates that permute logical qubits on physical " - "qubits. \n\n:param circuit: pytket circuit to be mapped" + "qubits.\n\n:param circuit: pytket circuit to be mapped" "\n:param routing_methods: Ranked methods to use for routing " "subcircuits. In given order, each method is sequentially checked " "for viability, with the first viable method being used.", diff --git a/pytket/binders/passes.cpp b/pytket/binders/passes.cpp index c384d9135e..dd1986ec65 100644 --- a/pytket/binders/passes.cpp +++ b/pytket/binders/passes.cpp @@ -33,7 +33,7 @@ namespace tket { static PassPtr gen_cx_mapping_pass_kwargs( const Architecture &arc, const PlacementPtr &placer, py::kwargs kwargs) { - RoutingMethodPtr method = std::make_shared(100); + RoutingMethodPtr method = std::make_shared(); std::vector config = {method}; if (kwargs.contains("config")) { config = py::cast>(kwargs["config"]); @@ -50,7 +50,7 @@ static PassPtr gen_cx_mapping_pass_kwargs( } static PassPtr gen_default_routing_pass(const Architecture &arc) { - RoutingMethodPtr method = std::make_shared(100); + RoutingMethodPtr method = std::make_shared(); std::vector config = {method}; return gen_routing_pass(arc, config); } @@ -510,9 +510,8 @@ PYBIND11_MODULE(passes, m) { "of an :py:class:`Architecture`. Edge direction is ignored." "\n\n:param arc: The architecture to use for connectivity information. " "\n:param placer: The Placement used for relabelling." - "\n:param config: Parameters for routing, a " - " list of RoutingMethod, each method is checked" - " and run if applicable in turn." + "\n:param config: Parameters for routing, a list of RoutingMethod, each " + "method is checked and run if applicable in turn." "\n:return: a pass to perform the remapping", py::arg("arc"), py::arg("placer"), py::arg("config")); diff --git a/pytket/pytket/architecture/__init__.py b/pytket/pytket/architecture/__init__.py index a1b9b99ccf..6ff2f2b9d7 100644 --- a/pytket/pytket/architecture/__init__.py +++ b/pytket/pytket/architecture/__init__.py @@ -1,14 +1,17 @@ # Copyright 2019-2022 Cambridge Quantum Computing # -# You may not use this file except in compliance with the Licence. -# You may obtain a copy of the Licence in the LICENCE file accompanying -# these documents or at: +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# https://cqcl.github.io/pytket/build/html/licence.html +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -"""The architecture module provides an API to interact with the - tket ::py:class:'Architecture' class, which for some set of identified physical qubits, - defines which can run two-qubit gates between them. This module is provided in binary - form during the PyPI installation.""" +"""The `architecture` module provides an API to interact with the ::py:class:`Architecture` class.""" from pytket._tket.architecture import * # type: ignore diff --git a/pytket/pytket/mapping/__init__.py b/pytket/pytket/mapping/__init__.py index 15965513fa..a413b474b3 100644 --- a/pytket/pytket/mapping/__init__.py +++ b/pytket/pytket/mapping/__init__.py @@ -1,14 +1,20 @@ # Copyright 2019-2022 Cambridge Quantum Computing # -# You may not use this file except in compliance with the Licence. -# You may obtain a copy of the Licence in the LICENCE file accompanying -# these documents or at: +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# https://cqcl.github.io/pytket/build/html/licence.html -"""The mapping module provides an API to interact with the - tket :py:class:`MappingManager` suite, with methods for +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The `mapping` module provides an API to interact with the + :py:class:`MappingManager` class, with methods for mapping logical circuits to physical circuits and for - defining custom routing solutions. This module is provided - in binary form during the PyPI installation.""" + defining custom routing solutions.""" from pytket._tket.mapping import * # type: ignore diff --git a/pytket/pytket/placement/__init__.py b/pytket/pytket/placement/__init__.py index 2c18b8d1b9..4b7ae79613 100644 --- a/pytket/pytket/placement/__init__.py +++ b/pytket/pytket/placement/__init__.py @@ -1,15 +1,20 @@ # Copyright 2019-2022 Cambridge Quantum Computing # -# You may not use this file except in compliance with the Licence. -# You may obtain a copy of the Licence in the LICENCE file accompanying -# these documents or at: +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# https://cqcl.github.io/pytket/build/html/licence.html +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -"""The placement module provides an API to interact with the many - tket ::py:class:'Placement' options, providing methods for relabelling +"""The `placement` module provides an API to interact with the many + :py:class:`Placement` options, providing methods for relabelling logical circuit qubit identifiers to physical architecture node identifiers, - for the purpose of compilation. This module is provided in binary form during the - PyPI installation.""" + for the purpose of compilation.""" from pytket._tket.placement import * # type: ignore diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py index 5c65f37616..3aea34b857 100644 --- a/pytket/tests/mapping_test.py +++ b/pytket/tests/mapping_test.py @@ -35,12 +35,10 @@ def route_subcircuit_func( relabelling_map = dict() for qb in circuit.qubits: - for n in unused_nodes: - if n == qb: - unused_nodes.remove(n) + unused_nodes.remove(qb) for qb in circuit.qubits: - if qb not in set(architecture.nodes): + if qb not in architecture.nodes: relabelling_map[qb] = unused_nodes.pop() else: # this is so later architecture.get_distance works @@ -65,9 +63,6 @@ def route_subcircuit_func( replacement_circuit.add_gate(com.op.type, rp_qubits) if len(com.qubits) == 2: if swaps_added < max_swaps: - # get node references for some stupid reason... - # theres some stupid casting issue - # just passing qubits didnt work.. whatever for n in architecture.nodes: if n == rp_qubits[0]: n0 = n @@ -88,7 +83,7 @@ def route_subcircuit_func( rp_qubits = [ permutation_map[relabelling_map[q]] for q in com.qubits ] - swaps_added = swaps_added + 1 + swaps_added += 1 break replacement_circuit.add_gate(com.op.type, rp_qubits) diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index b7c3d975d0..e2a08ac456 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -102,6 +102,10 @@ class Architecture : public ArchitectureBase> { */ node_set_t get_articulation_points(const Architecture &subarc) const; + /** + * Returns whether a gate defined by its OpType and the Qubit it acts on + * is possible on the architecture. + */ bool valid_operation( /*const OpType& optype, */ const std::vector &uids) const; diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index dc58334a34..e101b6024f 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -75,7 +75,6 @@ list(APPEND TKET_COMPS Program Characterisation Converters - TokenSwapping Placement ArchAwareSynth Mapping diff --git a/tket/src/Characterisation/CMakeLists.txt b/tket/src/Characterisation/CMakeLists.txt index 79a8ea1280..09017b7775 100644 --- a/tket/src/Characterisation/CMakeLists.txt +++ b/tket/src/Characterisation/CMakeLists.txt @@ -31,7 +31,6 @@ list(APPEND DEPS_${COMP} Ops OpType PauliGraph - TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Converters/CMakeLists.txt b/tket/src/Converters/CMakeLists.txt index 5892649513..ed00fd6d66 100644 --- a/tket/src/Converters/CMakeLists.txt +++ b/tket/src/Converters/CMakeLists.txt @@ -31,11 +31,10 @@ list(APPEND DEPS_${COMP} Circuit Clifford Diagonalisation - Gate + Gate Ops OpType PauliGraph - TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Diagonalisation/CMakeLists.txt b/tket/src/Diagonalisation/CMakeLists.txt index ce77270497..4e289e4723 100644 --- a/tket/src/Diagonalisation/CMakeLists.txt +++ b/tket/src/Diagonalisation/CMakeLists.txt @@ -27,11 +27,10 @@ list(APPEND DEPS_${COMP} Circuit Clifford Gate - Graphs + Graphs Ops OpType PauliGraph - TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt index 534adc2ea8..b7c6c74769 100644 --- a/tket/src/Mapping/CMakeLists.txt +++ b/tket/src/Mapping/CMakeLists.txt @@ -51,4 +51,9 @@ target_include_directories(tket-${COMP} ${TKET_${COMP}_INCLUDE_DIR} ${TKET_${COMP}_INCLUDE_DIR}/${COMP}) -target_link_libraries(tket-${COMP} PRIVATE ${CONAN_LIBS}) +target_link_libraries(tket-${COMP} PRIVATE ${CONAN_LIBS_SYMENGINE}) + +if (WIN32) + # For boost::uuid: + target_link_libraries(tket-${COMP} PRIVATE bcrypt) +endif() \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/LexiRoute.hpp b/tket/src/Mapping/include/Mapping/LexiRoute.hpp index cbd2d7b4e2..caada4ba71 100644 --- a/tket/src/Mapping/include/Mapping/LexiRoute.hpp +++ b/tket/src/Mapping/include/Mapping/LexiRoute.hpp @@ -167,7 +167,7 @@ class LexiRouteRoutingMethod : public RoutingMethod { * * @param _max_depth Number of layers of gates checked inr outed subcircuit. */ - LexiRouteRoutingMethod(unsigned _max_depth = 10); + LexiRouteRoutingMethod(unsigned _max_depth = 100); /** * @return true if method can route subcircuit, false if not diff --git a/tket/src/MeasurementSetup/CMakeLists.txt b/tket/src/MeasurementSetup/CMakeLists.txt index b45b773176..f5cf5e8469 100644 --- a/tket/src/MeasurementSetup/CMakeLists.txt +++ b/tket/src/MeasurementSetup/CMakeLists.txt @@ -28,11 +28,9 @@ list(APPEND DEPS_${COMP} Converters Diagonalisation Gate - Mapping Ops OpType PauliGraph - TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/PauliGraph/CMakeLists.txt b/tket/src/PauliGraph/CMakeLists.txt index af9054fdc6..6cb3392c1e 100644 --- a/tket/src/PauliGraph/CMakeLists.txt +++ b/tket/src/PauliGraph/CMakeLists.txt @@ -27,7 +27,6 @@ list(APPEND DEPS_${COMP} Gate Ops OpType - TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Placement/CMakeLists.txt b/tket/src/Placement/CMakeLists.txt index 29666d4e89..9d9dfd9f58 100644 --- a/tket/src/Placement/CMakeLists.txt +++ b/tket/src/Placement/CMakeLists.txt @@ -29,7 +29,7 @@ list(APPEND DEPS_${COMP} Characterisation Circuit Gate - Graphs + Graphs Ops OpType TokenSwapping diff --git a/tket/src/Program/CMakeLists.txt b/tket/src/Program/CMakeLists.txt index 2e3663de70..3cf2fce67e 100644 --- a/tket/src/Program/CMakeLists.txt +++ b/tket/src/Program/CMakeLists.txt @@ -28,10 +28,8 @@ add_library(tket-${COMP} list(APPEND DEPS_${COMP} Circuit Gate - Mapping Ops OpType - TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Simulation/CMakeLists.txt b/tket/src/Simulation/CMakeLists.txt index 7afd178442..ab28df4ea8 100644 --- a/tket/src/Simulation/CMakeLists.txt +++ b/tket/src/Simulation/CMakeLists.txt @@ -29,10 +29,8 @@ add_library(tket-${COMP} list(APPEND DEPS_${COMP} Circuit Gate - Mapping Ops OpType - TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Transformations/CMakeLists.txt b/tket/src/Transformations/CMakeLists.txt index acb66eb75e..38584b3b37 100644 --- a/tket/src/Transformations/CMakeLists.txt +++ b/tket/src/Transformations/CMakeLists.txt @@ -45,11 +45,9 @@ list(APPEND DEPS_${COMP} Converters Gate Graphs - Mapping Ops OpType PauliGraph - TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/tests/test_CompilerPass.cpp b/tket/tests/test_CompilerPass.cpp index 7342b4820a..403a2be219 100644 --- a/tket/tests/test_CompilerPass.cpp +++ b/tket/tests/test_CompilerPass.cpp @@ -1017,6 +1017,7 @@ SCENARIO("Commute measurements to the end of a circuit") { Command final_command = cu.get_circ_ref().get_commands()[7]; OpType type = final_command.get_op_ptr()->get_type(); REQUIRE(type == OpType::Measure); + std::cout << cu.get_circ_ref() << std::endl; REQUIRE(final_command.get_args().front() == Node(3)); } } From 8ed2f2b2fa8ae9340d51e671ff55f01ea89c4368 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 14 Feb 2022 09:38:10 +0000 Subject: [PATCH 063/146] Remove tokenswapping from dependneices --- tket/src/ArchAwareSynth/CMakeLists.txt | 1 - tket/src/Placement/CMakeLists.txt | 1 - 2 files changed, 2 deletions(-) diff --git a/tket/src/ArchAwareSynth/CMakeLists.txt b/tket/src/ArchAwareSynth/CMakeLists.txt index 95a85536fe..0556a9c0a0 100644 --- a/tket/src/ArchAwareSynth/CMakeLists.txt +++ b/tket/src/ArchAwareSynth/CMakeLists.txt @@ -35,7 +35,6 @@ list(APPEND DEPS_${COMP} Placement Ops OpType - TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Placement/CMakeLists.txt b/tket/src/Placement/CMakeLists.txt index 9d9dfd9f58..fd67a50983 100644 --- a/tket/src/Placement/CMakeLists.txt +++ b/tket/src/Placement/CMakeLists.txt @@ -32,7 +32,6 @@ list(APPEND DEPS_${COMP} Graphs Ops OpType - TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) From 86fb61e6f2922050a1ac438d1a8103ea6942a239 Mon Sep 17 00:00:00 2001 From: yao-cqc <75305462+yao-cqc@users.noreply.github.com> Date: Mon, 14 Feb 2022 10:12:29 +0000 Subject: [PATCH 064/146] Feature/decompose boxes in routing (#197) * Reject boxes in Architecture::valid_operation * Add `next_q_cut` method to a quantum cut Only consider quantum edges * Use `next_q_cut` in `advance_frontier_boundary` * Add BoxDecompositionRoutingMethod * Add tests * Reformat * Reject boxes in LexiRouteMethod::check_method * Update tests * Add JSON serialisation * Handle unused arguments * Refactor Circuit::decompose_boxes * fix naming Co-authored-by: sjdilkes --- tket/src/Architecture/Architecture.cpp | 7 +- tket/src/Architecture/CMakeLists.txt | 3 + .../include/Architecture/Architecture.hpp | 5 +- tket/src/Circuit/include/Circuit/Circuit.hpp | 12 ++ tket/src/Circuit/macro_circ_info.cpp | 38 +++++ tket/src/Circuit/macro_manipulation.cpp | 39 +++-- tket/src/Mapping/BoxDecomposition.cpp | 71 +++++++++ tket/src/Mapping/CMakeLists.txt | 1 + tket/src/Mapping/LexiRoute.cpp | 13 +- tket/src/Mapping/MappingFrontier.cpp | 16 +- tket/src/Mapping/MultiGateReorder.cpp | 3 +- tket/src/Mapping/RoutingMethodJson.cpp | 3 + .../include/Mapping/BoxDecomposition.hpp | 63 ++++++++ .../include/Mapping/MultiGateReorder.hpp | 4 +- .../include/Mapping/RoutingMethodJson.hpp | 1 + tket/tests/Circuit/test_Circ.cpp | 29 ++++ tket/tests/test_BoxDecompRoutingMethod.cpp | 137 ++++++++++++++++++ tket/tests/test_MappingFrontier.cpp | 30 ++++ tket/tests/test_MultiGateReorder.cpp | 18 ++- tket/tests/test_json.cpp | 5 +- tket/tests/tkettestsfiles.cmake | 1 + 21 files changed, 452 insertions(+), 47 deletions(-) create mode 100644 tket/src/Mapping/BoxDecomposition.cpp create mode 100644 tket/src/Mapping/include/Mapping/BoxDecomposition.hpp create mode 100644 tket/tests/test_BoxDecompRoutingMethod.cpp diff --git a/tket/src/Architecture/Architecture.cpp b/tket/src/Architecture/Architecture.cpp index 479f734f08..3c88bb5081 100644 --- a/tket/src/Architecture/Architecture.cpp +++ b/tket/src/Architecture/Architecture.cpp @@ -27,7 +27,12 @@ namespace tket { // basic implementation that works off same prior assumptions // TODO: Update this for more mature systems of multi-qubit gates bool Architecture::valid_operation( - /*const OpType& optype, */ const std::vector& uids) const { + const Op_ptr& op, const std::vector& uids) const { + if (op->get_desc().is_box() || + (op->get_type() == OpType::Conditional && + static_cast(*op).get_op()->get_desc().is_box())) + return false; + if (uids.size() == 1) { // TODO: for simple case here this should probably not pass if // node_exists[uids[0]] == FALSE, but should be fine for now? diff --git a/tket/src/Architecture/CMakeLists.txt b/tket/src/Architecture/CMakeLists.txt index 30f3b33135..c49a5a02ad 100644 --- a/tket/src/Architecture/CMakeLists.txt +++ b/tket/src/Architecture/CMakeLists.txt @@ -23,6 +23,9 @@ add_library(tket-${COMP} ArchitectureGraphClasses.cpp) list(APPEND DEPS_${COMP} + Circuit + OpType + Ops Graphs Utils) diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index b7c3d975d0..4d76614133 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -21,8 +21,10 @@ #include #include +#include "Circuit/Conditional.hpp" #include "Graphs/CompleteGraph.hpp" #include "Graphs/DirectedGraph.hpp" +#include "Ops/OpPtr.hpp" #include "Utils/BiMapHeaders.hpp" #include "Utils/EigenConfig.hpp" #include "Utils/Json.hpp" @@ -102,8 +104,7 @@ class Architecture : public ArchitectureBase> { */ node_set_t get_articulation_points(const Architecture &subarc) const; - bool valid_operation( - /*const OpType& optype, */ const std::vector &uids) const; + bool valid_operation(const Op_ptr &op, const std::vector &uids) const; /** * Sub-architecture generated by a subset of nodes. diff --git a/tket/src/Circuit/include/Circuit/Circuit.hpp b/tket/src/Circuit/include/Circuit/Circuit.hpp index 3d24de524a..dbd41d81c8 100644 --- a/tket/src/Circuit/include/Circuit/Circuit.hpp +++ b/tket/src/Circuit/include/Circuit/Circuit.hpp @@ -1059,6 +1059,11 @@ class Circuit { std::shared_ptr b_frontier, const std::function &skip_func) const; + // given current slice of quantum frontier, returns the next slice. + // ignore classical and boolean edges + CutFrontier next_q_cut( + std::shared_ptr u_frontier) const; + /** * Depth of circuit. * @@ -1375,6 +1380,13 @@ class Circuit { */ Circuit conditional_circuit(const bit_vector_t &bits, unsigned value) const; + /** + * Replaces one \ref vertex by applying \ref Box::to_circuit + * + * @return whether the vertex holds a box or a conditional box + */ + bool substitute_box_vertex(Vertex &vert, VertexDeletion vertex_deletion); + /** * Replaces each \ref Box operation by applying \ref Box::to_circuit * diff --git a/tket/src/Circuit/macro_circ_info.cpp b/tket/src/Circuit/macro_circ_info.cpp index e453e79cca..76b35732ec 100644 --- a/tket/src/Circuit/macro_circ_info.cpp +++ b/tket/src/Circuit/macro_circ_info.cpp @@ -517,6 +517,44 @@ CutFrontier Circuit::next_cut( get_next_b_frontier(*this, b_frontier, u_frontier, next_slice_lookup)}; } +CutFrontier Circuit::next_q_cut( + std::shared_ptr u_frontier) const { + auto next_slice = std::make_shared(); + VertexSet next_slice_lookup; + VertexSet bad_vertices; + EdgeSet edge_lookup; + for (const std::pair& pair : u_frontier->get()) { + edge_lookup.insert(pair.second); + } + + // find the next slice first + for (const std::pair& pair : u_frontier->get()) { + Vertex try_v = target(pair.second); + if (detect_final_Op(try_v)) continue; + if (next_slice_lookup.find(try_v) != next_slice_lookup.end()) + continue; // already going to be in next slice + bool good_vertex = bad_vertices.find(try_v) == bad_vertices.end(); + if (!good_vertex) continue; + EdgeVec ins = get_in_edges(try_v); + for (const Edge& in : ins) { + if (edge_lookup.find(in) == edge_lookup.end() && + get_edgetype(in) == EdgeType::Quantum) { + good_vertex = false; + bad_vertices.insert(try_v); + break; + } + } + if (good_vertex) { + next_slice_lookup.insert(try_v); + next_slice->push_back(try_v); + } + } + + return { + next_slice, get_next_u_frontier(*this, u_frontier, next_slice_lookup), + std::make_shared()}; +} + SliceVec Circuit::get_reverse_slices() const { vertex_map_t mapping; vertex_map_t rev_mapping; diff --git a/tket/src/Circuit/macro_manipulation.cpp b/tket/src/Circuit/macro_manipulation.cpp index 3f6c587cbd..c587b38fa2 100644 --- a/tket/src/Circuit/macro_manipulation.cpp +++ b/tket/src/Circuit/macro_manipulation.cpp @@ -629,27 +629,34 @@ Circuit Circuit::conditional_circuit( return cond_circ; } +bool Circuit::substitute_box_vertex( + Vertex& vert, VertexDeletion vertex_deletion) { + Op_ptr op = get_Op_ptr_from_Vertex(vert); + bool conditional = op->get_type() == OpType::Conditional; + if (conditional) { + const Conditional& cond = static_cast(*op); + op = cond.get_op(); + } + if (!op->get_desc().is_box()) return false; + const Box& b = static_cast(*op); + Circuit replacement = *b.to_circuit(); + if (conditional) { + substitute_conditional( + replacement, vert, vertex_deletion, OpGroupTransfer::Merge); + } else { + substitute(replacement, vert, vertex_deletion, OpGroupTransfer::Merge); + } + return true; +} + bool Circuit::decompose_boxes() { bool success = false; VertexList bin; BGL_FORALL_VERTICES(v, dag, DAG) { - Op_ptr op = get_Op_ptr_from_Vertex(v); - bool conditional = op->get_type() == OpType::Conditional; - if (conditional) { - const Conditional& cond = static_cast(*op); - op = cond.get_op(); - } - if (!op->get_desc().is_box()) continue; - const Box& b = static_cast(*op); - Circuit replacement = *b.to_circuit(); - if (conditional) { - substitute_conditional( - replacement, v, VertexDeletion::No, OpGroupTransfer::Merge); - } else { - substitute(replacement, v, VertexDeletion::No, OpGroupTransfer::Merge); + if (substitute_box_vertex(v, VertexDeletion::No)) { + bin.push_back(v); + success = true; } - bin.push_back(v); - success = true; } remove_vertices(bin, GraphRewiring::No, VertexDeletion::Yes); return success; diff --git a/tket/src/Mapping/BoxDecomposition.cpp b/tket/src/Mapping/BoxDecomposition.cpp new file mode 100644 index 0000000000..cd52143ece --- /dev/null +++ b/tket/src/Mapping/BoxDecomposition.cpp @@ -0,0 +1,71 @@ +#include "Mapping/BoxDecomposition.hpp" + +#include "Mapping/MappingFrontier.hpp" + +namespace tket { + +BoxDecomposition::BoxDecomposition( + const ArchitecturePtr &_architecture, + std::shared_ptr &_mapping_frontier) + : architecture_(_architecture), mapping_frontier_(_mapping_frontier) {} + +void BoxDecomposition::solve() { + // Box type vertices are later removed from DAG + VertexList bin; + + std::shared_ptr frontier_edges = + frontier_convert_vertport_to_edge( + this->mapping_frontier_->circuit_, + this->mapping_frontier_->quantum_boundary); + CutFrontier next_cut = + this->mapping_frontier_->circuit_.next_q_cut(frontier_edges); + for (Vertex &vert : *next_cut.slice) { + if (this->mapping_frontier_->circuit_.substitute_box_vertex( + vert, Circuit::VertexDeletion::No)) + bin.push_back(vert); + } + + // Delete vertices + this->mapping_frontier_->circuit_.remove_vertices( + bin, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); +} + +BoxDecompositionRoutingMethod::BoxDecompositionRoutingMethod(){}; + +bool BoxDecompositionRoutingMethod::check_method( + const std::shared_ptr &mapping_frontier, + const ArchitecturePtr & /*architecture*/) const { + std::shared_ptr frontier_edges = + frontier_convert_vertport_to_edge( + mapping_frontier->circuit_, mapping_frontier->quantum_boundary); + CutFrontier next_cut = mapping_frontier->circuit_.next_q_cut(frontier_edges); + for (const Vertex &vert : *next_cut.slice) { + Op_ptr op = mapping_frontier->circuit_.get_Op_ptr_from_Vertex(vert); + if (op->get_desc().is_box() || + (op->get_type() == OpType::Conditional && + static_cast(*op).get_op()->get_desc().is_box())) + return true; + } + return false; +} + +unit_map_t BoxDecompositionRoutingMethod::routing_method( + std::shared_ptr &mapping_frontier, + const ArchitecturePtr &architecture) const { + BoxDecomposition bd(architecture, mapping_frontier); + bd.solve(); + return {}; +} + +nlohmann::json BoxDecompositionRoutingMethod::serialize() const { + nlohmann::json j; + j["name"] = "BoxDecompositionRoutingMethod"; + return j; +} + +BoxDecompositionRoutingMethod BoxDecompositionRoutingMethod::deserialize( + const nlohmann::json & /*j*/) { + return BoxDecompositionRoutingMethod(); +} + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt index 534adc2ea8..17c2b4306d 100644 --- a/tket/src/Mapping/CMakeLists.txt +++ b/tket/src/Mapping/CMakeLists.txt @@ -24,6 +24,7 @@ add_library(tket-${COMP} MappingFrontier.cpp MappingManager.cpp MultiGateReorder.cpp + BoxDecomposition.cpp RoutingMethodCircuit.cpp RoutingMethodJson.cpp Verification.cpp) diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index ed3b8480cd..c1e3095263 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -510,8 +510,19 @@ LexiRouteRoutingMethod::LexiRouteRoutingMethod(unsigned _max_depth) : max_depth_(_max_depth){}; bool LexiRouteRoutingMethod::check_method( - const std::shared_ptr& /*mapping_frontier*/, + const std::shared_ptr& mapping_frontier, const ArchitecturePtr& /*architecture*/) const { + std::shared_ptr frontier_edges = + frontier_convert_vertport_to_edge( + mapping_frontier->circuit_, mapping_frontier->quantum_boundary); + CutFrontier next_cut = mapping_frontier->circuit_.next_q_cut(frontier_edges); + for (const Vertex& vert : *next_cut.slice) { + Op_ptr op = mapping_frontier->circuit_.get_Op_ptr_from_Vertex(vert); + if (op->get_desc().is_box() || + (op->get_type() == OpType::Conditional && + static_cast(*op).get_op()->get_desc().is_box())) + return false; + } return true; } diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index 86fd62bc8c..e435dc4261 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -218,19 +218,8 @@ void MappingFrontier::advance_frontier_boundary( std::shared_ptr frontier_edges = frontier_convert_vertport_to_edge( this->circuit_, this->quantum_boundary); - // Add all classical edges that share the same target - unsigned dummy_bit_index = 0; - for (const std::pair& pair : frontier_edges->get()) { - Vertex vert = this->circuit_.target(pair.second); - for (const Edge& e : - this->circuit_.get_in_edges_of_type(vert, EdgeType::Classical)) { - frontier_edges->insert({Bit(dummy_bit_index), e}); - dummy_bit_index++; - } - } - CutFrontier next_cut = this->circuit_.next_cut( - frontier_edges, std::make_shared()); + CutFrontier next_cut = this->circuit_.next_q_cut(frontier_edges); // For each vertex in a slice, if its physically permitted, update // quantum_boundary with quantum out edges from vertex (i.e. @@ -252,8 +241,7 @@ void MappingFrontier::advance_frontier_boundary( nodes.push_back(Node(uid)); } if (architecture->valid_operation( - /* this->circuit_.get_OpType_from_Vertex(vert), */ - nodes) || + this->circuit_.get_Op_ptr_from_Vertex(vert), nodes) || this->circuit_.get_OpType_from_Vertex(vert) == OpType::Barrier) { // if no valid operation, boundary not updated and while loop terminates boundary_updated = true; diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index c05ea06122..59f052c2bc 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -67,8 +67,9 @@ bool is_physically_permitted( for (port_t port = 0; port < frontier->circuit_.n_ports(vert); ++port) { nodes.push_back(Node(get_unitid_from_vertex_port(frontier, {vert, port}))); } + Op_ptr op = frontier->circuit_.get_Op_ptr_from_Vertex(vert); - return arc_ptr->valid_operation(nodes); + return arc_ptr->valid_operation(op, nodes); } // This method will try to commute a vertex to the quantum frontier diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index 1f9479c89f..ba17a22c4c 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -39,6 +39,9 @@ void from_json(const nlohmann::json& j, std::vector& rmp_v) { } else if (name == "MultiGateReorderRoutingMethod") { rmp_v.push_back(std::make_shared( MultiGateReorderRoutingMethod::deserialize(c))); + } else if (name == "BoxDecompositionRoutingMethod") { + rmp_v.push_back(std::make_shared( + BoxDecompositionRoutingMethod::deserialize(c))); } else { std::logic_error( "Deserialization for given RoutingMethod not supported."); diff --git a/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp new file mode 100644 index 0000000000..8b1cd45fa4 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp @@ -0,0 +1,63 @@ +#ifndef _TKET_BoxDecomposition_H_ +#define _TKET_BoxDecomposition_H_ + +#include "Mapping/MappingFrontier.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class BoxDecomposition { + public: + /** + * Class Constructor + * @param _architecture Architecture object added operations must respect + * @param _mapping_frontier Contains Circuit object to be modified + */ + BoxDecomposition( + const ArchitecturePtr& _architecture, + std::shared_ptr& _mapping_frontier); + + /** + * Decompose any boxes in the next slice after the frontier + */ + void solve(); + + private: + // Architecture all new physical operations must respect + ArchitecturePtr architecture_; + std::shared_ptr mapping_frontier_; +}; + +class BoxDecompositionRoutingMethod : public RoutingMethod { + public: + /** + * Decompose any boxes on the frontier + */ + BoxDecompositionRoutingMethod(); + + /** + * @return true if method can route subcircuit, false if not + */ + bool check_method( + const std::shared_ptr& mapping_frontier, + const ArchitecturePtr& /*architecture*/) const override; + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + unit_map_t routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + nlohmann::json serialize() const override; + + static BoxDecompositionRoutingMethod deserialize(const nlohmann::json& /*j*/); +}; + +} // namespace tket + +#endif \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp index cb7a51c300..317cf9b6d7 100644 --- a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp +++ b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp @@ -59,8 +59,8 @@ class MultiGateReorderRoutingMethod : public RoutingMethod { * @return true if method can route subcircuit, false if not */ bool check_method( - const std::shared_ptr& /*mapping_frontier*/, - const ArchitecturePtr& /*architecture*/) const override; + const std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; /** * @param mapping_frontier Contains boundary of routed/unrouted circuit for diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp index 9cbdb22e90..0a12ed92ff 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp @@ -14,6 +14,7 @@ #pragma once +#include "Mapping/BoxDecomposition.hpp" #include "Mapping/LexiRoute.hpp" #include "Mapping/MultiGateReorder.hpp" #include "Mapping/RoutingMethod.hpp" diff --git a/tket/tests/Circuit/test_Circ.cpp b/tket/tests/Circuit/test_Circ.cpp index 7d0f43f48b..d60b463a98 100644 --- a/tket/tests/Circuit/test_Circ.cpp +++ b/tket/tests/Circuit/test_Circ.cpp @@ -1288,6 +1288,35 @@ SCENARIO("Test next slice") { } } +SCENARIO("Test next quantum slice") { + GIVEN("A simple circuit") { + Circuit circ(3, 1); + Vertex v1 = circ.add_op(OpType::X, {0}); + Vertex v2 = + circ.add_conditional_gate(OpType::Rx, {0.6}, {1}, {0}, 1); + Vertex v3 = + circ.add_conditional_gate(OpType::Ry, {0.6}, {2}, {0}, 1); + Vertex v4 = circ.add_op(OpType::S, {2}); + Vertex v5 = circ.add_op(OpType::T, {1}); + + auto frontier = std::make_shared(); + for (const Qubit& q : circ.all_qubits()) { + Vertex in = circ.get_in(q); + frontier->insert({q, circ.get_nth_out_edge(in, 0)}); + } + CutFrontier slice_front = circ.next_q_cut(frontier); + Slice sl = *slice_front.slice; + WHEN("The frontier is calculated from inputs") { + THEN("The first slice is recovered accurately.") { + REQUIRE(sl.size() == 3); + REQUIRE(sl[0] == v1); + REQUIRE(sl[1] == v2); + REQUIRE(sl[2] == v3); + } + } + } +} + SCENARIO("Test circuit.transpose() method") { GIVEN("Simple circuit") { Circuit circ(2); diff --git a/tket/tests/test_BoxDecompRoutingMethod.cpp b/tket/tests/test_BoxDecompRoutingMethod.cpp new file mode 100644 index 0000000000..d2ec20da5d --- /dev/null +++ b/tket/tests/test_BoxDecompRoutingMethod.cpp @@ -0,0 +1,137 @@ +#include + +#include "Mapping/BoxDecomposition.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Predicates/Predicates.hpp" +#include "Simulation/CircuitSimulator.hpp" +#include "Simulation/ComparisonFunctions.hpp" + +namespace tket { +SCENARIO("Decompose boxes") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + Eigen::Matrix4cd m; + m << 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0; + Unitary2qBox ubox(m); + + GIVEN("A box") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + + circ.add_box(ubox, {0, 2}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + std::shared_ptr mf = + std::make_shared(circ); + BoxDecomposition bd(shared_arc, mf); + bd.solve(); + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + std::vector commands = mf->circuit_.get_commands(); + for (Command c : commands) { + REQUIRE(!c.get_op_ptr()->get_desc().is_box()); + } + } + + GIVEN("A conditional box") { + Circuit circ(4, 1); + std::vector qubits = circ.all_qubits(); + Conditional cond(std::make_shared(ubox), 1, 1); + circ.add_op( + std::make_shared(cond), {Bit(0), Qubit(0), Qubit(1)}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + BoxDecomposition bd(shared_arc, mf); + bd.solve(); + std::vector commands = mf->circuit_.get_commands(); + for (Command c : commands) { + Op_ptr op = c.get_op_ptr(); + REQUIRE( + !(op->get_desc().is_box() || (op->get_type() == OpType::Conditional && + static_cast(*op) + .get_op() + ->get_desc() + .is_box()))); + } + } + + GIVEN("Test BoxDecompositionRoutingMethod") { + Circuit circ(4, 1); + std::vector qubits = circ.all_qubits(); + circ.add_box(ubox, {0, 3}); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + circ.add_box(ubox, {1, 3}); + circ.add_box(ubox, {0, 1}); + circ.add_op(OpType::X, {qubits[1]}); + circ.add_op(OpType::Measure, {0, 0}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + MappingManager mm(shared_arc); + std::vector vrm = { + std::make_shared(10), + std::make_shared()}; + bool res = mm.route_circuit(circ, vrm); + REQUIRE(res); + PredicatePtr routed_correctly = + std::make_shared(architecture); + REQUIRE(routed_correctly->verify(circ)); + std::vector commands = mf->circuit_.get_commands(); + for (Command c : commands) { + REQUIRE(!c.get_op_ptr()->get_desc().is_box()); + } + } +} + +SCENARIO("Test JSON serialisation for BoxDecompositionRoutingMethod") { + GIVEN("BoxDecompositionRoutingMethod") { + nlohmann::json j_rm; + j_rm["name"] = "BoxDecompositionRoutingMethod"; + BoxDecompositionRoutingMethod rm_loaded = + BoxDecompositionRoutingMethod::deserialize(j_rm); + nlohmann::json j_rm_serialised = rm_loaded.serialize(); + REQUIRE(j_rm == j_rm_serialised); + } + + GIVEN("BoxDecompositionRoutingMethod vector") { + nlohmann::json j_rms = { + {{"name", "BoxDecompositionRoutingMethod"}}, + { + {"name", "LexiRouteRoutingMethod"}, + {"depth", 3}, + }}; + std::vector rms = + j_rms.get>(); + nlohmann::json j_rms_serialised = rms; + REQUIRE(j_rms == j_rms_serialised); + } +} + +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MappingFrontier.cpp b/tket/tests/test_MappingFrontier.cpp index bb33f0f095..9a82006a07 100644 --- a/tket/tests/test_MappingFrontier.cpp +++ b/tket/tests/test_MappingFrontier.cpp @@ -86,6 +86,36 @@ SCENARIO("Test MappingFrontier initialisation, advance_frontier_boundary.") { REQUIRE(mf.circuit_.source(e3) == v9); REQUIRE(mf.circuit_.target(e3) == v3); } + + GIVEN("A circuit with measurements and classically controlled operations") { + Circuit circ(3, 1); + std::vector qubits = circ.all_qubits(); + // All gates are physically permitted + Vertex v0 = circ.add_op(OpType::Measure, {0, 0}); + Vertex v1 = + circ.add_conditional_gate(OpType::Rx, {0.6}, {0}, {0}, 1); + Vertex v2 = + circ.add_conditional_gate(OpType::Rz, {0.6}, {1}, {0}, 1); + Vertex v3 = circ.add_op(OpType::X, {2}); + std::vector nodes = {Node(0), Node(1), Node(2)}; + + Architecture arc({{nodes[0], nodes[1]}, {nodes[1], nodes[2]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + mf.advance_frontier_boundary(shared_arc); + VertPort vp0 = mf.quantum_boundary->get().find(nodes[0])->second; + VertPort vp1 = mf.quantum_boundary->get().find(nodes[1])->second; + VertPort vp2 = mf.quantum_boundary->get().find(nodes[2])->second; + Op_ptr op = circ.get_Op_ptr_from_Vertex(vp0.first); + Op_ptr op2 = circ.get_Op_ptr_from_Vertex(vp1.first); + Op_ptr op3 = circ.get_Op_ptr_from_Vertex(vp2.first); + REQUIRE(vp0.first == v1); + REQUIRE(vp1.first == v2); + REQUIRE(vp2.first == v3); + } } SCENARIO("Test MappingFrontier get_default_to_quantum_boundary_unit_map") { diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index 12117c2955..2e8e4c099a 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -44,7 +44,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -85,7 +85,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -131,7 +131,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -178,7 +178,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -220,8 +220,10 @@ SCENARIO("Reorder circuits with limited search space") { // Check only the first valid CZ get commuted to the front std::vector commands = circ.get_commands(); REQUIRE(shared_arc->valid_operation( + commands[0].get_op_ptr(), {Node(commands[0].get_args()[0]), Node(commands[0].get_args()[1])})); REQUIRE(!shared_arc->valid_operation( + commands[0].get_op_ptr(), {Node(commands[1].get_args()[0]), Node(commands[1].get_args()[1])})); const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -273,7 +275,7 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -297,13 +299,13 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { for (auto arg : commands2[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation(commands2[i].get_op_ptr(), nodes)); } std::vector nodes; for (auto arg : commands2[4].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(!shared_arc->valid_operation(nodes)); + REQUIRE(!shared_arc->valid_operation(commands2[4].get_op_ptr(), nodes)); const auto u2 = tket_sim::get_unitary(circ2); REQUIRE(tket_sim::compare_statevectors_or_unitaries( u2, u1, tket_sim::MatrixEquivalence::EQUAL)); @@ -358,7 +360,7 @@ SCENARIO("Test MappingManager with MultiGateReorderRoutingMethod") { } } -SCENARIO("Test JSON serialisation") { +SCENARIO("Test JSON serialisation for MultiGateReorderRoutingMethod") { GIVEN("MultiGateReorderRoutingMethod") { nlohmann::json j_rm; j_rm["name"] = "MultiGateReorderRoutingMethod"; diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index acc586497d..61766d8162 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -632,10 +632,11 @@ SCENARIO("Test compiler pass serializations") { nlohmann::json j_loaded = loaded; REQUIRE(j_pp == j_loaded); } - GIVEN("Routing with MultiGateReorderRoutingMethod") { + GIVEN("Routing with multiple routing methods") { RoutingMethodPtr mrmp = std::make_shared(60, 80); - std::vector mrcon = {mrmp, rmp}; + RoutingMethodPtr brmp = std::make_shared(); + std::vector mrcon = {mrmp, rmp, brmp}; Circuit circ = CircuitsForTesting::get().uccsd; CompilationUnit cu{circ}; PassPtr placement = gen_placement_pass(place); diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 4fcdd0481e..1e40106386 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -95,6 +95,7 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/test_LexicographicalComparison.cpp ${TKET_TESTS_DIR}/test_LexiRoute.cpp ${TKET_TESTS_DIR}/test_MultiGateReorder.cpp + ${TKET_TESTS_DIR}/test_BoxDecompRoutingMethod.cpp ${TKET_TESTS_DIR}/test_DeviceCharacterisation.cpp ${TKET_TESTS_DIR}/test_Clifford.cpp ${TKET_TESTS_DIR}/test_MeasurementSetup.cpp From 4d2dbce6cddf45cbfcdde387bf644e59f19c9313 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 14 Feb 2022 10:21:33 +0000 Subject: [PATCH 065/146] update compilation for tokenswapping --- tket/src/CMakeLists.txt | 1 + tket/src/Mapping/MappingManager.cpp | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index e101b6024f..f6924b79a1 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -77,6 +77,7 @@ list(APPEND TKET_COMPS Converters Placement ArchAwareSynth + TokenSwapping Mapping MeasurementSetup Transformations diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index 3df1a52f17..6896c57f9e 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -14,7 +14,6 @@ #include "Mapping/MappingManager.hpp" -#include "OpType/OpTypeFunctions.hpp" #include "TokenSwapping/main_entry_functions.hpp" namespace tket { From a11223661cd999cbb1ec084dc4393b1d13ae2518 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 14 Feb 2022 10:22:05 +0000 Subject: [PATCH 066/146] Revert "Feature/decompose boxes in routing (#197)" This reverts commit 86fb61e6f2922050a1ac438d1a8103ea6942a239. --- tket/src/Architecture/Architecture.cpp | 7 +- tket/src/Architecture/CMakeLists.txt | 3 - .../include/Architecture/Architecture.hpp | 5 +- tket/src/Circuit/include/Circuit/Circuit.hpp | 12 -- tket/src/Circuit/macro_circ_info.cpp | 38 ----- tket/src/Circuit/macro_manipulation.cpp | 39 ++--- tket/src/Mapping/BoxDecomposition.cpp | 71 --------- tket/src/Mapping/CMakeLists.txt | 1 - tket/src/Mapping/LexiRoute.cpp | 13 +- tket/src/Mapping/MappingFrontier.cpp | 16 +- tket/src/Mapping/MultiGateReorder.cpp | 3 +- tket/src/Mapping/RoutingMethodJson.cpp | 3 - .../include/Mapping/BoxDecomposition.hpp | 63 -------- .../include/Mapping/MultiGateReorder.hpp | 4 +- .../include/Mapping/RoutingMethodJson.hpp | 1 - tket/tests/Circuit/test_Circ.cpp | 29 ---- tket/tests/test_BoxDecompRoutingMethod.cpp | 137 ------------------ tket/tests/test_MappingFrontier.cpp | 30 ---- tket/tests/test_MultiGateReorder.cpp | 18 +-- tket/tests/test_json.cpp | 5 +- tket/tests/tkettestsfiles.cmake | 1 - 21 files changed, 47 insertions(+), 452 deletions(-) delete mode 100644 tket/src/Mapping/BoxDecomposition.cpp delete mode 100644 tket/src/Mapping/include/Mapping/BoxDecomposition.hpp delete mode 100644 tket/tests/test_BoxDecompRoutingMethod.cpp diff --git a/tket/src/Architecture/Architecture.cpp b/tket/src/Architecture/Architecture.cpp index 3c88bb5081..479f734f08 100644 --- a/tket/src/Architecture/Architecture.cpp +++ b/tket/src/Architecture/Architecture.cpp @@ -27,12 +27,7 @@ namespace tket { // basic implementation that works off same prior assumptions // TODO: Update this for more mature systems of multi-qubit gates bool Architecture::valid_operation( - const Op_ptr& op, const std::vector& uids) const { - if (op->get_desc().is_box() || - (op->get_type() == OpType::Conditional && - static_cast(*op).get_op()->get_desc().is_box())) - return false; - + /*const OpType& optype, */ const std::vector& uids) const { if (uids.size() == 1) { // TODO: for simple case here this should probably not pass if // node_exists[uids[0]] == FALSE, but should be fine for now? diff --git a/tket/src/Architecture/CMakeLists.txt b/tket/src/Architecture/CMakeLists.txt index c49a5a02ad..30f3b33135 100644 --- a/tket/src/Architecture/CMakeLists.txt +++ b/tket/src/Architecture/CMakeLists.txt @@ -23,9 +23,6 @@ add_library(tket-${COMP} ArchitectureGraphClasses.cpp) list(APPEND DEPS_${COMP} - Circuit - OpType - Ops Graphs Utils) diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index 4d76614133..b7c3d975d0 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -21,10 +21,8 @@ #include #include -#include "Circuit/Conditional.hpp" #include "Graphs/CompleteGraph.hpp" #include "Graphs/DirectedGraph.hpp" -#include "Ops/OpPtr.hpp" #include "Utils/BiMapHeaders.hpp" #include "Utils/EigenConfig.hpp" #include "Utils/Json.hpp" @@ -104,7 +102,8 @@ class Architecture : public ArchitectureBase> { */ node_set_t get_articulation_points(const Architecture &subarc) const; - bool valid_operation(const Op_ptr &op, const std::vector &uids) const; + bool valid_operation( + /*const OpType& optype, */ const std::vector &uids) const; /** * Sub-architecture generated by a subset of nodes. diff --git a/tket/src/Circuit/include/Circuit/Circuit.hpp b/tket/src/Circuit/include/Circuit/Circuit.hpp index dbd41d81c8..3d24de524a 100644 --- a/tket/src/Circuit/include/Circuit/Circuit.hpp +++ b/tket/src/Circuit/include/Circuit/Circuit.hpp @@ -1059,11 +1059,6 @@ class Circuit { std::shared_ptr b_frontier, const std::function &skip_func) const; - // given current slice of quantum frontier, returns the next slice. - // ignore classical and boolean edges - CutFrontier next_q_cut( - std::shared_ptr u_frontier) const; - /** * Depth of circuit. * @@ -1380,13 +1375,6 @@ class Circuit { */ Circuit conditional_circuit(const bit_vector_t &bits, unsigned value) const; - /** - * Replaces one \ref vertex by applying \ref Box::to_circuit - * - * @return whether the vertex holds a box or a conditional box - */ - bool substitute_box_vertex(Vertex &vert, VertexDeletion vertex_deletion); - /** * Replaces each \ref Box operation by applying \ref Box::to_circuit * diff --git a/tket/src/Circuit/macro_circ_info.cpp b/tket/src/Circuit/macro_circ_info.cpp index 76b35732ec..e453e79cca 100644 --- a/tket/src/Circuit/macro_circ_info.cpp +++ b/tket/src/Circuit/macro_circ_info.cpp @@ -517,44 +517,6 @@ CutFrontier Circuit::next_cut( get_next_b_frontier(*this, b_frontier, u_frontier, next_slice_lookup)}; } -CutFrontier Circuit::next_q_cut( - std::shared_ptr u_frontier) const { - auto next_slice = std::make_shared(); - VertexSet next_slice_lookup; - VertexSet bad_vertices; - EdgeSet edge_lookup; - for (const std::pair& pair : u_frontier->get()) { - edge_lookup.insert(pair.second); - } - - // find the next slice first - for (const std::pair& pair : u_frontier->get()) { - Vertex try_v = target(pair.second); - if (detect_final_Op(try_v)) continue; - if (next_slice_lookup.find(try_v) != next_slice_lookup.end()) - continue; // already going to be in next slice - bool good_vertex = bad_vertices.find(try_v) == bad_vertices.end(); - if (!good_vertex) continue; - EdgeVec ins = get_in_edges(try_v); - for (const Edge& in : ins) { - if (edge_lookup.find(in) == edge_lookup.end() && - get_edgetype(in) == EdgeType::Quantum) { - good_vertex = false; - bad_vertices.insert(try_v); - break; - } - } - if (good_vertex) { - next_slice_lookup.insert(try_v); - next_slice->push_back(try_v); - } - } - - return { - next_slice, get_next_u_frontier(*this, u_frontier, next_slice_lookup), - std::make_shared()}; -} - SliceVec Circuit::get_reverse_slices() const { vertex_map_t mapping; vertex_map_t rev_mapping; diff --git a/tket/src/Circuit/macro_manipulation.cpp b/tket/src/Circuit/macro_manipulation.cpp index c587b38fa2..3f6c587cbd 100644 --- a/tket/src/Circuit/macro_manipulation.cpp +++ b/tket/src/Circuit/macro_manipulation.cpp @@ -629,34 +629,27 @@ Circuit Circuit::conditional_circuit( return cond_circ; } -bool Circuit::substitute_box_vertex( - Vertex& vert, VertexDeletion vertex_deletion) { - Op_ptr op = get_Op_ptr_from_Vertex(vert); - bool conditional = op->get_type() == OpType::Conditional; - if (conditional) { - const Conditional& cond = static_cast(*op); - op = cond.get_op(); - } - if (!op->get_desc().is_box()) return false; - const Box& b = static_cast(*op); - Circuit replacement = *b.to_circuit(); - if (conditional) { - substitute_conditional( - replacement, vert, vertex_deletion, OpGroupTransfer::Merge); - } else { - substitute(replacement, vert, vertex_deletion, OpGroupTransfer::Merge); - } - return true; -} - bool Circuit::decompose_boxes() { bool success = false; VertexList bin; BGL_FORALL_VERTICES(v, dag, DAG) { - if (substitute_box_vertex(v, VertexDeletion::No)) { - bin.push_back(v); - success = true; + Op_ptr op = get_Op_ptr_from_Vertex(v); + bool conditional = op->get_type() == OpType::Conditional; + if (conditional) { + const Conditional& cond = static_cast(*op); + op = cond.get_op(); + } + if (!op->get_desc().is_box()) continue; + const Box& b = static_cast(*op); + Circuit replacement = *b.to_circuit(); + if (conditional) { + substitute_conditional( + replacement, v, VertexDeletion::No, OpGroupTransfer::Merge); + } else { + substitute(replacement, v, VertexDeletion::No, OpGroupTransfer::Merge); } + bin.push_back(v); + success = true; } remove_vertices(bin, GraphRewiring::No, VertexDeletion::Yes); return success; diff --git a/tket/src/Mapping/BoxDecomposition.cpp b/tket/src/Mapping/BoxDecomposition.cpp deleted file mode 100644 index cd52143ece..0000000000 --- a/tket/src/Mapping/BoxDecomposition.cpp +++ /dev/null @@ -1,71 +0,0 @@ -#include "Mapping/BoxDecomposition.hpp" - -#include "Mapping/MappingFrontier.hpp" - -namespace tket { - -BoxDecomposition::BoxDecomposition( - const ArchitecturePtr &_architecture, - std::shared_ptr &_mapping_frontier) - : architecture_(_architecture), mapping_frontier_(_mapping_frontier) {} - -void BoxDecomposition::solve() { - // Box type vertices are later removed from DAG - VertexList bin; - - std::shared_ptr frontier_edges = - frontier_convert_vertport_to_edge( - this->mapping_frontier_->circuit_, - this->mapping_frontier_->quantum_boundary); - CutFrontier next_cut = - this->mapping_frontier_->circuit_.next_q_cut(frontier_edges); - for (Vertex &vert : *next_cut.slice) { - if (this->mapping_frontier_->circuit_.substitute_box_vertex( - vert, Circuit::VertexDeletion::No)) - bin.push_back(vert); - } - - // Delete vertices - this->mapping_frontier_->circuit_.remove_vertices( - bin, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); -} - -BoxDecompositionRoutingMethod::BoxDecompositionRoutingMethod(){}; - -bool BoxDecompositionRoutingMethod::check_method( - const std::shared_ptr &mapping_frontier, - const ArchitecturePtr & /*architecture*/) const { - std::shared_ptr frontier_edges = - frontier_convert_vertport_to_edge( - mapping_frontier->circuit_, mapping_frontier->quantum_boundary); - CutFrontier next_cut = mapping_frontier->circuit_.next_q_cut(frontier_edges); - for (const Vertex &vert : *next_cut.slice) { - Op_ptr op = mapping_frontier->circuit_.get_Op_ptr_from_Vertex(vert); - if (op->get_desc().is_box() || - (op->get_type() == OpType::Conditional && - static_cast(*op).get_op()->get_desc().is_box())) - return true; - } - return false; -} - -unit_map_t BoxDecompositionRoutingMethod::routing_method( - std::shared_ptr &mapping_frontier, - const ArchitecturePtr &architecture) const { - BoxDecomposition bd(architecture, mapping_frontier); - bd.solve(); - return {}; -} - -nlohmann::json BoxDecompositionRoutingMethod::serialize() const { - nlohmann::json j; - j["name"] = "BoxDecompositionRoutingMethod"; - return j; -} - -BoxDecompositionRoutingMethod BoxDecompositionRoutingMethod::deserialize( - const nlohmann::json & /*j*/) { - return BoxDecompositionRoutingMethod(); -} - -} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt index 84518bb144..b7c6c74769 100644 --- a/tket/src/Mapping/CMakeLists.txt +++ b/tket/src/Mapping/CMakeLists.txt @@ -24,7 +24,6 @@ add_library(tket-${COMP} MappingFrontier.cpp MappingManager.cpp MultiGateReorder.cpp - BoxDecomposition.cpp RoutingMethodCircuit.cpp RoutingMethodJson.cpp Verification.cpp) diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index c1e3095263..ed3b8480cd 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -510,19 +510,8 @@ LexiRouteRoutingMethod::LexiRouteRoutingMethod(unsigned _max_depth) : max_depth_(_max_depth){}; bool LexiRouteRoutingMethod::check_method( - const std::shared_ptr& mapping_frontier, + const std::shared_ptr& /*mapping_frontier*/, const ArchitecturePtr& /*architecture*/) const { - std::shared_ptr frontier_edges = - frontier_convert_vertport_to_edge( - mapping_frontier->circuit_, mapping_frontier->quantum_boundary); - CutFrontier next_cut = mapping_frontier->circuit_.next_q_cut(frontier_edges); - for (const Vertex& vert : *next_cut.slice) { - Op_ptr op = mapping_frontier->circuit_.get_Op_ptr_from_Vertex(vert); - if (op->get_desc().is_box() || - (op->get_type() == OpType::Conditional && - static_cast(*op).get_op()->get_desc().is_box())) - return false; - } return true; } diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index e435dc4261..86fd62bc8c 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -218,8 +218,19 @@ void MappingFrontier::advance_frontier_boundary( std::shared_ptr frontier_edges = frontier_convert_vertport_to_edge( this->circuit_, this->quantum_boundary); + // Add all classical edges that share the same target + unsigned dummy_bit_index = 0; + for (const std::pair& pair : frontier_edges->get()) { + Vertex vert = this->circuit_.target(pair.second); + for (const Edge& e : + this->circuit_.get_in_edges_of_type(vert, EdgeType::Classical)) { + frontier_edges->insert({Bit(dummy_bit_index), e}); + dummy_bit_index++; + } + } - CutFrontier next_cut = this->circuit_.next_q_cut(frontier_edges); + CutFrontier next_cut = this->circuit_.next_cut( + frontier_edges, std::make_shared()); // For each vertex in a slice, if its physically permitted, update // quantum_boundary with quantum out edges from vertex (i.e. @@ -241,7 +252,8 @@ void MappingFrontier::advance_frontier_boundary( nodes.push_back(Node(uid)); } if (architecture->valid_operation( - this->circuit_.get_Op_ptr_from_Vertex(vert), nodes) || + /* this->circuit_.get_OpType_from_Vertex(vert), */ + nodes) || this->circuit_.get_OpType_from_Vertex(vert) == OpType::Barrier) { // if no valid operation, boundary not updated and while loop terminates boundary_updated = true; diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index 59f052c2bc..c05ea06122 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -67,9 +67,8 @@ bool is_physically_permitted( for (port_t port = 0; port < frontier->circuit_.n_ports(vert); ++port) { nodes.push_back(Node(get_unitid_from_vertex_port(frontier, {vert, port}))); } - Op_ptr op = frontier->circuit_.get_Op_ptr_from_Vertex(vert); - return arc_ptr->valid_operation(op, nodes); + return arc_ptr->valid_operation(nodes); } // This method will try to commute a vertex to the quantum frontier diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index ba17a22c4c..1f9479c89f 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -39,9 +39,6 @@ void from_json(const nlohmann::json& j, std::vector& rmp_v) { } else if (name == "MultiGateReorderRoutingMethod") { rmp_v.push_back(std::make_shared( MultiGateReorderRoutingMethod::deserialize(c))); - } else if (name == "BoxDecompositionRoutingMethod") { - rmp_v.push_back(std::make_shared( - BoxDecompositionRoutingMethod::deserialize(c))); } else { std::logic_error( "Deserialization for given RoutingMethod not supported."); diff --git a/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp deleted file mode 100644 index 8b1cd45fa4..0000000000 --- a/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef _TKET_BoxDecomposition_H_ -#define _TKET_BoxDecomposition_H_ - -#include "Mapping/MappingFrontier.hpp" -#include "Mapping/RoutingMethod.hpp" - -namespace tket { - -class BoxDecomposition { - public: - /** - * Class Constructor - * @param _architecture Architecture object added operations must respect - * @param _mapping_frontier Contains Circuit object to be modified - */ - BoxDecomposition( - const ArchitecturePtr& _architecture, - std::shared_ptr& _mapping_frontier); - - /** - * Decompose any boxes in the next slice after the frontier - */ - void solve(); - - private: - // Architecture all new physical operations must respect - ArchitecturePtr architecture_; - std::shared_ptr mapping_frontier_; -}; - -class BoxDecompositionRoutingMethod : public RoutingMethod { - public: - /** - * Decompose any boxes on the frontier - */ - BoxDecompositionRoutingMethod(); - - /** - * @return true if method can route subcircuit, false if not - */ - bool check_method( - const std::shared_ptr& mapping_frontier, - const ArchitecturePtr& /*architecture*/) const override; - - /** - * @param mapping_frontier Contains boundary of routed/unrouted circuit for - * modifying - * @param architecture Architecture providing physical constraints - * @return Logical to Physical mapping at boundary due to modification. - * - */ - unit_map_t routing_method( - std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const override; - - nlohmann::json serialize() const override; - - static BoxDecompositionRoutingMethod deserialize(const nlohmann::json& /*j*/); -}; - -} // namespace tket - -#endif \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp index 317cf9b6d7..cb7a51c300 100644 --- a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp +++ b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp @@ -59,8 +59,8 @@ class MultiGateReorderRoutingMethod : public RoutingMethod { * @return true if method can route subcircuit, false if not */ bool check_method( - const std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const override; + const std::shared_ptr& /*mapping_frontier*/, + const ArchitecturePtr& /*architecture*/) const override; /** * @param mapping_frontier Contains boundary of routed/unrouted circuit for diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp index 0a12ed92ff..9cbdb22e90 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp @@ -14,7 +14,6 @@ #pragma once -#include "Mapping/BoxDecomposition.hpp" #include "Mapping/LexiRoute.hpp" #include "Mapping/MultiGateReorder.hpp" #include "Mapping/RoutingMethod.hpp" diff --git a/tket/tests/Circuit/test_Circ.cpp b/tket/tests/Circuit/test_Circ.cpp index d60b463a98..7d0f43f48b 100644 --- a/tket/tests/Circuit/test_Circ.cpp +++ b/tket/tests/Circuit/test_Circ.cpp @@ -1288,35 +1288,6 @@ SCENARIO("Test next slice") { } } -SCENARIO("Test next quantum slice") { - GIVEN("A simple circuit") { - Circuit circ(3, 1); - Vertex v1 = circ.add_op(OpType::X, {0}); - Vertex v2 = - circ.add_conditional_gate(OpType::Rx, {0.6}, {1}, {0}, 1); - Vertex v3 = - circ.add_conditional_gate(OpType::Ry, {0.6}, {2}, {0}, 1); - Vertex v4 = circ.add_op(OpType::S, {2}); - Vertex v5 = circ.add_op(OpType::T, {1}); - - auto frontier = std::make_shared(); - for (const Qubit& q : circ.all_qubits()) { - Vertex in = circ.get_in(q); - frontier->insert({q, circ.get_nth_out_edge(in, 0)}); - } - CutFrontier slice_front = circ.next_q_cut(frontier); - Slice sl = *slice_front.slice; - WHEN("The frontier is calculated from inputs") { - THEN("The first slice is recovered accurately.") { - REQUIRE(sl.size() == 3); - REQUIRE(sl[0] == v1); - REQUIRE(sl[1] == v2); - REQUIRE(sl[2] == v3); - } - } - } -} - SCENARIO("Test circuit.transpose() method") { GIVEN("Simple circuit") { Circuit circ(2); diff --git a/tket/tests/test_BoxDecompRoutingMethod.cpp b/tket/tests/test_BoxDecompRoutingMethod.cpp deleted file mode 100644 index d2ec20da5d..0000000000 --- a/tket/tests/test_BoxDecompRoutingMethod.cpp +++ /dev/null @@ -1,137 +0,0 @@ -#include - -#include "Mapping/BoxDecomposition.hpp" -#include "Mapping/LexiRoute.hpp" -#include "Mapping/MappingManager.hpp" -#include "Predicates/Predicates.hpp" -#include "Simulation/CircuitSimulator.hpp" -#include "Simulation/ComparisonFunctions.hpp" - -namespace tket { -SCENARIO("Decompose boxes") { - std::vector nodes = { - Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), - Node("node_test", 3)}; - - // n0 -- n1 -- n2 -- n3 - Architecture architecture( - {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); - ArchitecturePtr shared_arc = std::make_shared(architecture); - - Eigen::Matrix4cd m; - m << 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0; - Unitary2qBox ubox(m); - - GIVEN("A box") { - Circuit circ(4); - std::vector qubits = circ.all_qubits(); - - circ.add_box(ubox, {0, 2}); - std::map rename_map = { - {qubits[0], nodes[0]}, - {qubits[1], nodes[1]}, - {qubits[2], nodes[2]}, - {qubits[3], nodes[3]}}; - circ.rename_units(rename_map); - Circuit circ_copy(circ); - std::shared_ptr mf = - std::make_shared(circ); - BoxDecomposition bd(shared_arc, mf); - bd.solve(); - const auto u = tket_sim::get_unitary(circ); - const auto u1 = tket_sim::get_unitary(circ_copy); - REQUIRE(tket_sim::compare_statevectors_or_unitaries( - u, u1, tket_sim::MatrixEquivalence::EQUAL)); - std::vector commands = mf->circuit_.get_commands(); - for (Command c : commands) { - REQUIRE(!c.get_op_ptr()->get_desc().is_box()); - } - } - - GIVEN("A conditional box") { - Circuit circ(4, 1); - std::vector qubits = circ.all_qubits(); - Conditional cond(std::make_shared(ubox), 1, 1); - circ.add_op( - std::make_shared(cond), {Bit(0), Qubit(0), Qubit(1)}); - std::map rename_map = { - {qubits[0], nodes[0]}, - {qubits[1], nodes[1]}, - {qubits[2], nodes[2]}, - {qubits[3], nodes[3]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - BoxDecomposition bd(shared_arc, mf); - bd.solve(); - std::vector commands = mf->circuit_.get_commands(); - for (Command c : commands) { - Op_ptr op = c.get_op_ptr(); - REQUIRE( - !(op->get_desc().is_box() || (op->get_type() == OpType::Conditional && - static_cast(*op) - .get_op() - ->get_desc() - .is_box()))); - } - } - - GIVEN("Test BoxDecompositionRoutingMethod") { - Circuit circ(4, 1); - std::vector qubits = circ.all_qubits(); - circ.add_box(ubox, {0, 3}); - circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); - circ.add_op(OpType::CX, {qubits[1], qubits[3]}); - circ.add_box(ubox, {1, 3}); - circ.add_box(ubox, {0, 1}); - circ.add_op(OpType::X, {qubits[1]}); - circ.add_op(OpType::Measure, {0, 0}); - std::map rename_map = { - {qubits[0], nodes[0]}, - {qubits[1], nodes[1]}, - {qubits[2], nodes[2]}, - {qubits[3], nodes[3]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - MappingManager mm(shared_arc); - std::vector vrm = { - std::make_shared(10), - std::make_shared()}; - bool res = mm.route_circuit(circ, vrm); - REQUIRE(res); - PredicatePtr routed_correctly = - std::make_shared(architecture); - REQUIRE(routed_correctly->verify(circ)); - std::vector commands = mf->circuit_.get_commands(); - for (Command c : commands) { - REQUIRE(!c.get_op_ptr()->get_desc().is_box()); - } - } -} - -SCENARIO("Test JSON serialisation for BoxDecompositionRoutingMethod") { - GIVEN("BoxDecompositionRoutingMethod") { - nlohmann::json j_rm; - j_rm["name"] = "BoxDecompositionRoutingMethod"; - BoxDecompositionRoutingMethod rm_loaded = - BoxDecompositionRoutingMethod::deserialize(j_rm); - nlohmann::json j_rm_serialised = rm_loaded.serialize(); - REQUIRE(j_rm == j_rm_serialised); - } - - GIVEN("BoxDecompositionRoutingMethod vector") { - nlohmann::json j_rms = { - {{"name", "BoxDecompositionRoutingMethod"}}, - { - {"name", "LexiRouteRoutingMethod"}, - {"depth", 3}, - }}; - std::vector rms = - j_rms.get>(); - nlohmann::json j_rms_serialised = rms; - REQUIRE(j_rms == j_rms_serialised); - } -} - -} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MappingFrontier.cpp b/tket/tests/test_MappingFrontier.cpp index 9a82006a07..bb33f0f095 100644 --- a/tket/tests/test_MappingFrontier.cpp +++ b/tket/tests/test_MappingFrontier.cpp @@ -86,36 +86,6 @@ SCENARIO("Test MappingFrontier initialisation, advance_frontier_boundary.") { REQUIRE(mf.circuit_.source(e3) == v9); REQUIRE(mf.circuit_.target(e3) == v3); } - - GIVEN("A circuit with measurements and classically controlled operations") { - Circuit circ(3, 1); - std::vector qubits = circ.all_qubits(); - // All gates are physically permitted - Vertex v0 = circ.add_op(OpType::Measure, {0, 0}); - Vertex v1 = - circ.add_conditional_gate(OpType::Rx, {0.6}, {0}, {0}, 1); - Vertex v2 = - circ.add_conditional_gate(OpType::Rz, {0.6}, {1}, {0}, 1); - Vertex v3 = circ.add_op(OpType::X, {2}); - std::vector nodes = {Node(0), Node(1), Node(2)}; - - Architecture arc({{nodes[0], nodes[1]}, {nodes[1], nodes[2]}}); - ArchitecturePtr shared_arc = std::make_shared(arc); - std::map rename_map = { - {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; - circ.rename_units(rename_map); - MappingFrontier mf(circ); - mf.advance_frontier_boundary(shared_arc); - VertPort vp0 = mf.quantum_boundary->get().find(nodes[0])->second; - VertPort vp1 = mf.quantum_boundary->get().find(nodes[1])->second; - VertPort vp2 = mf.quantum_boundary->get().find(nodes[2])->second; - Op_ptr op = circ.get_Op_ptr_from_Vertex(vp0.first); - Op_ptr op2 = circ.get_Op_ptr_from_Vertex(vp1.first); - Op_ptr op3 = circ.get_Op_ptr_from_Vertex(vp2.first); - REQUIRE(vp0.first == v1); - REQUIRE(vp1.first == v2); - REQUIRE(vp2.first == v3); - } } SCENARIO("Test MappingFrontier get_default_to_quantum_boundary_unit_map") { diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index 2e8e4c099a..12117c2955 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -44,7 +44,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); + REQUIRE(shared_arc->valid_operation(nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -85,7 +85,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); + REQUIRE(shared_arc->valid_operation(nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -131,7 +131,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); + REQUIRE(shared_arc->valid_operation(nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -178,7 +178,7 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); + REQUIRE(shared_arc->valid_operation(nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -220,10 +220,8 @@ SCENARIO("Reorder circuits with limited search space") { // Check only the first valid CZ get commuted to the front std::vector commands = circ.get_commands(); REQUIRE(shared_arc->valid_operation( - commands[0].get_op_ptr(), {Node(commands[0].get_args()[0]), Node(commands[0].get_args()[1])})); REQUIRE(!shared_arc->valid_operation( - commands[0].get_op_ptr(), {Node(commands[1].get_args()[0]), Node(commands[1].get_args()[1])})); const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -275,7 +273,7 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); + REQUIRE(shared_arc->valid_operation(nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -299,13 +297,13 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { for (auto arg : commands2[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands2[i].get_op_ptr(), nodes)); + REQUIRE(shared_arc->valid_operation(nodes)); } std::vector nodes; for (auto arg : commands2[4].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(!shared_arc->valid_operation(commands2[4].get_op_ptr(), nodes)); + REQUIRE(!shared_arc->valid_operation(nodes)); const auto u2 = tket_sim::get_unitary(circ2); REQUIRE(tket_sim::compare_statevectors_or_unitaries( u2, u1, tket_sim::MatrixEquivalence::EQUAL)); @@ -360,7 +358,7 @@ SCENARIO("Test MappingManager with MultiGateReorderRoutingMethod") { } } -SCENARIO("Test JSON serialisation for MultiGateReorderRoutingMethod") { +SCENARIO("Test JSON serialisation") { GIVEN("MultiGateReorderRoutingMethod") { nlohmann::json j_rm; j_rm["name"] = "MultiGateReorderRoutingMethod"; diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index 61766d8162..acc586497d 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -632,11 +632,10 @@ SCENARIO("Test compiler pass serializations") { nlohmann::json j_loaded = loaded; REQUIRE(j_pp == j_loaded); } - GIVEN("Routing with multiple routing methods") { + GIVEN("Routing with MultiGateReorderRoutingMethod") { RoutingMethodPtr mrmp = std::make_shared(60, 80); - RoutingMethodPtr brmp = std::make_shared(); - std::vector mrcon = {mrmp, rmp, brmp}; + std::vector mrcon = {mrmp, rmp}; Circuit circ = CircuitsForTesting::get().uccsd; CompilationUnit cu{circ}; PassPtr placement = gen_placement_pass(place); diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 1e40106386..4fcdd0481e 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -95,7 +95,6 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/test_LexicographicalComparison.cpp ${TKET_TESTS_DIR}/test_LexiRoute.cpp ${TKET_TESTS_DIR}/test_MultiGateReorder.cpp - ${TKET_TESTS_DIR}/test_BoxDecompRoutingMethod.cpp ${TKET_TESTS_DIR}/test_DeviceCharacterisation.cpp ${TKET_TESTS_DIR}/test_Clifford.cpp ${TKET_TESTS_DIR}/test_MeasurementSetup.cpp From 650a189c7427fd3370b9c0c73dc7f27677d8a43c Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 14 Feb 2022 14:48:12 +0000 Subject: [PATCH 067/146] Address PR Requested changes --- pytket/binders/mapping.cpp | 3 +- tket/src/Architecture/Architecture.cpp | 22 +++---- tket/src/Architecture/CMakeLists.txt | 1 + .../include/Architecture/Architecture.hpp | 5 +- .../Graphs/include/Graphs/AbstractGraph.hpp | 5 ++ .../src/Mapping/LexicographicalComparison.cpp | 7 +-- tket/src/Mapping/MappingFrontier.cpp | 59 ++----------------- tket/src/Mapping/MappingManager.cpp | 3 - tket/src/Mapping/MultiGateReorder.cpp | 16 ++--- .../src/Mapping/include/Mapping/LexiRoute.hpp | 2 +- .../Mapping/LexicographicalComparison.hpp | 7 ++- .../include/Mapping/MappingFrontier.hpp | 8 --- .../include/Mapping/MappingManager.hpp | 2 - .../Mapping/include/Mapping/RoutingMethod.hpp | 2 +- tket/src/Placement/Qubit_Placement.cpp | 15 ++--- .../Placement/include/Placement/Placement.hpp | 5 +- tket/src/Predicates/PassGenerators.cpp | 2 +- tket/tests/test_CompilerPass.cpp | 1 - tket/tests/test_MultiGateReorder.cpp | 25 +++++--- 19 files changed, 68 insertions(+), 122 deletions(-) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index e1993cf5b3..0974165a74 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -76,7 +76,8 @@ PYBIND11_MODULE(mapping, m) { m, "MappingManager", "Defined by a pytket Architecture object, maps Circuit logical Qubits " "to Physically permitted Architecture qubits. Mapping is completed by " - "sequential routing (full or partial) of subcircuits. A custom method for " + "sequential routing (full or partial) of subcircuits. A custom method " + "for " "routing (full or partial) of subcircuits can be defined in Python.") .def( py::init(), diff --git a/tket/src/Architecture/Architecture.cpp b/tket/src/Architecture/Architecture.cpp index 479f734f08..f493b4166a 100644 --- a/tket/src/Architecture/Architecture.cpp +++ b/tket/src/Architecture/Architecture.cpp @@ -27,24 +27,20 @@ namespace tket { // basic implementation that works off same prior assumptions // TODO: Update this for more mature systems of multi-qubit gates bool Architecture::valid_operation( - /*const OpType& optype, */ const std::vector& uids) const { - if (uids.size() == - 1) { // TODO: for simple case here this should probably not pass if - // node_exists[uids[0]] == FALSE, but should be fine for now? + const OpType& optype, const std::vector& uids) const { + if (uids.size() == 1) { + // with current Architecture can assume all single qubit gates valid + return true; + } else if (optype == OpType::Barrier) { return true; } else if (uids.size() == 2) { if (this->node_exists(uids[0]) && this->node_exists(uids[1]) && - (this->edge_exists(uids[0], uids[1]) || - this->edge_exists(uids[1], uids[0]))) { + this->bidirectional_edge_exists(uids[0], uids[1])) { return true; } - } else if (uids.size() == 3) { - bool con_0_exists = - (this->edge_exists(uids[0], uids[1]) || - this->edge_exists(uids[1], uids[0])); - bool con_1_exists = - (this->edge_exists(uids[2], uids[1]) || - this->edge_exists(uids[1], uids[2])); + } else if (uids.size() == 3 && optype == OpType::BRIDGE) { + bool con_0_exists = this->bidirectional_edge_exists(uids[0], uids[1]); + bool con_1_exists = this->bidirectional_edge_exists(uids[2], uids[1]); if (this->node_exists(uids[0]) && this->node_exists(uids[1]) && this->node_exists(uids[2]) && con_0_exists && con_1_exists) { return true; diff --git a/tket/src/Architecture/CMakeLists.txt b/tket/src/Architecture/CMakeLists.txt index 30f3b33135..ead73a0507 100644 --- a/tket/src/Architecture/CMakeLists.txt +++ b/tket/src/Architecture/CMakeLists.txt @@ -24,6 +24,7 @@ add_library(tket-${COMP} list(APPEND DEPS_${COMP} Graphs + OpType Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index b7c3d975d0..348ad33294 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -23,6 +23,7 @@ #include "Graphs/CompleteGraph.hpp" #include "Graphs/DirectedGraph.hpp" +#include "OpType/OpType.hpp" #include "Utils/BiMapHeaders.hpp" #include "Utils/EigenConfig.hpp" #include "Utils/Json.hpp" @@ -103,7 +104,7 @@ class Architecture : public ArchitectureBase> { node_set_t get_articulation_points(const Architecture &subarc) const; bool valid_operation( - /*const OpType& optype, */ const std::vector &uids) const; + const OpType &optype, const std::vector &uids) const; /** * Sub-architecture generated by a subset of nodes. @@ -203,7 +204,7 @@ class SquareGrid : public Architecture { unsigned layers; }; -typedef std::shared_ptr ArchitecturePtr; +typedef std::shared_ptr ArchitecturePtr; int tri_lexicographical_comparison( const dist_vec &dist1, const dist_vec &dist2); diff --git a/tket/src/Graphs/include/Graphs/AbstractGraph.hpp b/tket/src/Graphs/include/Graphs/AbstractGraph.hpp index 4b27796872..b5f6373073 100644 --- a/tket/src/Graphs/include/Graphs/AbstractGraph.hpp +++ b/tket/src/Graphs/include/Graphs/AbstractGraph.hpp @@ -53,6 +53,11 @@ class AbstractGraph { /** Check if an edge exists between two nodes */ virtual bool edge_exists(const T &node1, const T &node2) const = 0; + /** Check if an edge exists between two nodes */ + bool bidirectional_edge_exists(const T &node1, const T &node2) const { + return (edge_exists(node1, node2) || edge_exists(node2, node1)); + } + /** Check if a node exists */ bool node_exists(const T &node) const { return nodes_.contains(node); } diff --git a/tket/src/Mapping/LexicographicalComparison.cpp b/tket/src/Mapping/LexicographicalComparison.cpp index a221baa09f..4789349019 100644 --- a/tket/src/Mapping/LexicographicalComparison.cpp +++ b/tket/src/Mapping/LexicographicalComparison.cpp @@ -16,10 +16,6 @@ namespace tket { -/** - * Assumes all node in interacting_nodes in architecture, and ignores if they - * aren't maybe throw error instead? - */ LexicographicalComparison::LexicographicalComparison( const ArchitecturePtr& _architecture, const interacting_nodes_t& _interacting_nodes) @@ -54,8 +50,7 @@ void LexicographicalComparison::increment_distances( if (distances[distances_index] == 0 && increment < 0) { throw LexicographicalComparisonError( "Negative increment value is larger than value held at index, " - "modification not " - "allowed."); + "modification not allowed."); } distances[distances_index] += increment; } diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index 86fd62bc8c..ca719efee1 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -15,6 +15,8 @@ #include "Mapping/MappingFrontier.hpp" #include "Circuit/Circuit.hpp" +#include "Utils/UnitID.hpp" + namespace tket { /** @@ -252,9 +254,7 @@ void MappingFrontier::advance_frontier_boundary( nodes.push_back(Node(uid)); } if (architecture->valid_operation( - /* this->circuit_.get_OpType_from_Vertex(vert), */ - nodes) || - this->circuit_.get_OpType_from_Vertex(vert) == OpType::Barrier) { + this->circuit_.get_OpType_from_Vertex(vert), nodes)) { // if no valid operation, boundary not updated and while loop terminates boundary_updated = true; for (const UnitID& uid : uids) { @@ -349,8 +349,6 @@ void MappingFrontier::update_quantum_boundary_uids( } } -// TODO: expects every qubit is present in permutation, even if unmoved -// TODO: should this also permute final map compared to initial map void MappingFrontier::permute_subcircuit_q_out_hole( const unit_map_t& final_permutation, Subcircuit& subcircuit) { EdgeVec new_q_out_hole; @@ -492,7 +490,7 @@ void MappingFrontier::add_swap(const UnitID& uid_0, const UnitID& uid_1) { std::map final_map = {{n0, n1}, {n1, n0}}; - this->update_final_map(final_map); + update_maps(this->bimaps_, {}, final_map); } void MappingFrontier::add_bridge( @@ -545,8 +543,7 @@ void MappingFrontier::add_ancilla(const UnitID& ancilla) { unit_map_t update_map; update_map.insert({uid_ancilla, uid_ancilla}); - this->update_initial_map(update_map); - this->update_final_map(update_map); + update_maps(this->bimaps_, update_map, update_map); } void MappingFrontier::merge_ancilla( @@ -599,50 +596,4 @@ void MappingFrontier::merge_ancilla( this->bimaps_->final.left.erase(merge); } -template -void MappingFrontier::update_initial_map(const std::map& qm) { - // Can only work for Unit classes - static_assert(std::is_base_of::value); - static_assert(std::is_base_of::value); - // Unit types must be related, so cannot rename e.g. Bits to Qubits - static_assert( - std::is_base_of::value || - std::is_base_of::value); - unit_map_t new_initial_map; - for (const std::pair& pair : qm) { - const auto& it = this->bimaps_->initial.right.find(pair.first); - if (it == this->bimaps_->initial.right.end()) { - continue; - } - new_initial_map.insert({it->second, pair.second}); - this->bimaps_->initial.right.erase(pair.first); - } - for (const std::pair& pair : new_initial_map) { - this->bimaps_->initial.left.insert(pair); - } -} - -template -void MappingFrontier::update_final_map(const std::map& qm) { - // Can only work for Unit classes - static_assert(std::is_base_of::value); - static_assert(std::is_base_of::value); - // Unit types must be related, so cannot rename e.g. Bits to Qubits - static_assert( - std::is_base_of::value || - std::is_base_of::value); - unit_map_t new_final_map; - for (const std::pair& pair : qm) { - const auto& it = this->bimaps_->final.right.find(pair.first); - if (it == this->bimaps_->final.right.end()) { - continue; - } - new_final_map.insert({it->second, pair.second}); - this->bimaps_->final.right.erase(pair.first); - } - for (const std::pair& pair : new_final_map) { - this->bimaps_->final.left.insert(pair); - } -} - } // namespace tket diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index 6896c57f9e..3d95cfa573 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -31,9 +31,6 @@ bool MappingManager::route_circuit( bool MappingManager::route_circuit_with_maps( Circuit& circuit, const std::vector& routing_methods, std::shared_ptr maps) const { - // Assumption; Routing can not route a circuit - // with more logical qubits than an Architecture has - // physical qubits physically permitted if (circuit.n_qubits() > this->architecture_->n_nodes()) { std::string error_string = "Circuit has" + std::to_string(circuit.n_qubits()) + diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index c05ea06122..7065c15e75 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -30,7 +30,7 @@ MultiGateReorder::MultiGateReorder( // Traverse the DAG to the quantum frontier // to find the UnitID associated with an VertPort -UnitID get_unitid_from_vertex_port( +static UnitID get_unitid_from_vertex_port( const std::shared_ptr &frontier, const VertPort &vert_port) { VertPort current_vert_port = vert_port; @@ -50,7 +50,7 @@ UnitID get_unitid_from_vertex_port( } } -bool is_multiq_quantum_gate(const Circuit &circ, const Vertex &vert) { +static bool is_multiq_quantum_gate(const Circuit &circ, const Vertex &vert) { Op_ptr op = circ.get_Op_ptr_from_Vertex(vert); return ( op->get_desc().is_gate() && circ.n_in_edges(vert) > 1 && @@ -60,19 +60,19 @@ bool is_multiq_quantum_gate(const Circuit &circ, const Vertex &vert) { circ.n_out_edges(vert)); } -bool is_physically_permitted( +static bool is_physically_permitted( const std::shared_ptr &frontier, const ArchitecturePtr &arc_ptr, const Vertex &vert) { std::vector nodes; for (port_t port = 0; port < frontier->circuit_.n_ports(vert); ++port) { nodes.push_back(Node(get_unitid_from_vertex_port(frontier, {vert, port}))); } - - return arc_ptr->valid_operation(nodes); + return arc_ptr->valid_operation( + frontier->circuit_.get_OpType_from_Vertex(vert), nodes); } // This method will try to commute a vertex to the quantum frontier -std::optional> try_find_commute_edges( +static std::optional> try_find_commute_edges( const Circuit &circ, const EdgeVec &frontier_edges, const Vertex &vert) { // Initialize to be the in_edges for the given vertex EdgeVec current_edges = circ.get_in_edges(vert); @@ -133,7 +133,7 @@ std::optional> try_find_commute_edges( } } -void partial_rewire( +static void partial_rewire( const Vertex &vert, Circuit &circ, EdgeVec &src_edges, EdgeVec &dest_edges) { // move the vertex to the frontier @@ -231,7 +231,7 @@ void MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { MultiGateReorderRoutingMethod::MultiGateReorderRoutingMethod( unsigned _max_depth, unsigned _max_size) - : max_depth_(_max_depth), max_size_(_max_size){}; + : max_depth_(_max_depth), max_size_(_max_size) {} bool MultiGateReorderRoutingMethod::check_method( const std::shared_ptr &mapping_frontier, diff --git a/tket/src/Mapping/include/Mapping/LexiRoute.hpp b/tket/src/Mapping/include/Mapping/LexiRoute.hpp index caada4ba71..ad39ca26cd 100644 --- a/tket/src/Mapping/include/Mapping/LexiRoute.hpp +++ b/tket/src/Mapping/include/Mapping/LexiRoute.hpp @@ -180,7 +180,7 @@ class LexiRouteRoutingMethod : public RoutingMethod { * @param mapping_frontier Contains boundary of routed/unrouted circuit for * modifying * @param architecture Architecture providing physical constraints - * @return Logical to Physical mapping at boundary due to modification. + * @return Map between relabelled Qubit, always empty. * */ unit_map_t routing_method( diff --git a/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp b/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp index 8911340ab2..f597ad4df4 100644 --- a/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp +++ b/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp @@ -65,9 +65,14 @@ class LexicographicalComparison { const std::pair& interaction, int increment) const; /** - * Getter for lexicographical_distances_ attribute + * Returns a held lexicograhically ordered vector of distances between nodes + * and architectuture class object is constructed from, with changes + * from increment distances. + * + * @return Lexicographically ordered distance vector */ lexicographical_distances_t get_lexicographical_distances() const; + /** * Takes a copy of Distance vector held in object and modifies it to reflect * how distance between pairs of interacting nodes in attribute would change diff --git a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp index 0554ae7c6d..5fdbc695fc 100644 --- a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp +++ b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp @@ -43,8 +43,6 @@ std::shared_ptr frontier_convert_vertport_to_edge( * convert_u_frontier_to_edges * Subcircuit requires EdgeVec, not unit_frontier_t as boundary information * Helper Functions to convert types - * TODO: also probably another way of doing this? EdgeVec required for - * subcircuit. Double check with someone who knows better than I... */ EdgeVec convert_u_frontier_to_edges(const unit_frontier_t& u_frontier); struct MappingFrontier { @@ -181,12 +179,6 @@ struct MappingFrontier { * @param new_boundary Object to reassign with. */ void set_quantum_boundary(const unit_vertport_frontier_t& new_boundary); - - template - void update_initial_map(const std::map& qm); - - template - void update_final_map(const std::map& qm); }; } // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MappingManager.hpp b/tket/src/Mapping/include/Mapping/MappingManager.hpp index 09f342ee4a..4a02e85d39 100644 --- a/tket/src/Mapping/include/Mapping/MappingManager.hpp +++ b/tket/src/Mapping/include/Mapping/MappingManager.hpp @@ -28,8 +28,6 @@ class MappingManagerError : public std::logic_error { : std::logic_error(message) {} }; -typedef ArchitecturePtr ArchitecturePtr; - class MappingManager { public: /* Mapping Manager Constructor */ diff --git a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp index 7dc4d7d344..23041e2105 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp @@ -68,6 +68,6 @@ class RoutingMethod { } }; -typedef std::shared_ptr RoutingMethodPtr; +typedef std::shared_ptr RoutingMethodPtr; } // namespace tket \ No newline at end of file diff --git a/tket/src/Placement/Qubit_Placement.cpp b/tket/src/Placement/Qubit_Placement.cpp index 100073f0b9..d3c324edc2 100644 --- a/tket/src/Placement/Qubit_Placement.cpp +++ b/tket/src/Placement/Qubit_Placement.cpp @@ -38,12 +38,9 @@ std::set interacting_qbs(const Circuit& circ) { } PlacementFrontier::PlacementFrontier(const Circuit& _circ) : circ(_circ) { - init(); -} -void PlacementFrontier::init() { VertexVec input_slice; quantum_in_edges = std::make_shared(); - classical_in_edges = std::make_shared(); + boolean_in_edges = std::make_shared(); for (const Qubit& qb : circ.all_qubits()) { Vertex input = circ.get_in(qb); @@ -54,28 +51,28 @@ void PlacementFrontier::init() { for (const Bit& bit : circ.all_bits()) { Vertex input = circ.get_in(bit); EdgeVec candidates = circ.get_nth_b_out_bundle(input, 0); - classical_in_edges->insert({bit, candidates}); + boolean_in_edges->insert({bit, candidates}); } - CutFrontier next_cut = circ.next_cut(quantum_in_edges, classical_in_edges); + CutFrontier next_cut = circ.next_cut(quantum_in_edges, boolean_in_edges); slice = next_cut.slice; quantum_out_edges = next_cut.u_frontier; } void PlacementFrontier::next_slicefrontier() { quantum_in_edges = std::make_shared(); - classical_in_edges = std::make_shared(); + boolean_in_edges = std::make_shared(); for (const std::pair& pair : quantum_out_edges->get()) { Edge new_e = circ.skip_irrelevant_edges(pair.second); quantum_in_edges->insert({pair.first, new_e}); Vertex targ = circ.target(new_e); EdgeVec targ_classical_ins = circ.get_in_edges_of_type(targ, EdgeType::Boolean); - classical_in_edges->insert( + boolean_in_edges->insert( {Bit("frontier_bit", pair.first.index()), targ_classical_ins}); } - CutFrontier next_cut = circ.next_cut(quantum_in_edges, classical_in_edges); + CutFrontier next_cut = circ.next_cut(quantum_in_edges, boolean_in_edges); slice = next_cut.slice; quantum_out_edges = next_cut.u_frontier; } diff --git a/tket/src/Placement/include/Placement/Placement.hpp b/tket/src/Placement/include/Placement/Placement.hpp index 0026361034..e567c0042b 100644 --- a/tket/src/Placement/include/Placement/Placement.hpp +++ b/tket/src/Placement/include/Placement/Placement.hpp @@ -115,14 +115,13 @@ struct PlacementFrontier { std::shared_ptr quantum_out_edges; // Boolean edges coming in to vertices in slice. Guarantees that all edges // into every vertex in slice is represented in next_cut - std::shared_ptr classical_in_edges; + std::shared_ptr boolean_in_edges; // reference to circuit that it acts on const Circuit& circ; - explicit PlacementFrontier(const Circuit& _circ); // initialise at front of circuit - void init(); + explicit PlacementFrontier(const Circuit& _circ); // move to next slice void next_slicefrontier(); }; diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index d54f752be8..93eef00cce 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -199,7 +199,7 @@ PassPtr gen_default_mapping_pass(const Architecture& arc) { PassPtr gen_cx_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, - const std::vector>& config, bool directed_cx, + const std::vector& config, bool directed_cx, bool delay_measures) { PassPtr rebase_pass = gen_rebase_pass( {OpType::CX}, CircPool::CX(), all_single_qubit_types(), diff --git a/tket/tests/test_CompilerPass.cpp b/tket/tests/test_CompilerPass.cpp index 403a2be219..7342b4820a 100644 --- a/tket/tests/test_CompilerPass.cpp +++ b/tket/tests/test_CompilerPass.cpp @@ -1017,7 +1017,6 @@ SCENARIO("Commute measurements to the end of a circuit") { Command final_command = cu.get_circ_ref().get_commands()[7]; OpType type = final_command.get_op_ptr()->get_type(); REQUIRE(type == OpType::Measure); - std::cout << cu.get_circ_ref() << std::endl; REQUIRE(final_command.get_args().front() == Node(3)); } } diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index 12117c2955..1409ce23e3 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -44,7 +44,8 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation( + commands[i].get_op_ptr()->get_type(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -85,7 +86,8 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation( + commands[i].get_op_ptr()->get_type(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -131,7 +133,8 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation( + commands[i].get_op_ptr()->get_type(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -147,7 +150,7 @@ SCENARIO("Reorder circuits") { // Physically invalid operations circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); // Physically valid operations - circ.add_op(OpType::CCX, {qubits[1], qubits[2], qubits[3]}); + circ.add_op(OpType::BRIDGE, {qubits[1], qubits[2], qubits[3]}); circ.add_op(OpType::Rx, 0.5, {qubits[3]}); circ.add_op(OpType::CX, {qubits[2], qubits[3]}); circ.add_op(OpType::Rz, 0.5, {qubits[0]}); @@ -178,7 +181,8 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation( + commands[i].get_op_ptr()->get_type(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -220,8 +224,10 @@ SCENARIO("Reorder circuits with limited search space") { // Check only the first valid CZ get commuted to the front std::vector commands = circ.get_commands(); REQUIRE(shared_arc->valid_operation( + commands[0].get_op_ptr()->get_type(), {Node(commands[0].get_args()[0]), Node(commands[0].get_args()[1])})); REQUIRE(!shared_arc->valid_operation( + commands[1].get_op_ptr()->get_type(), {Node(commands[1].get_args()[0]), Node(commands[1].get_args()[1])})); const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -273,7 +279,8 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation( + commands[i].get_op_ptr()->get_type(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -297,13 +304,15 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { for (auto arg : commands2[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(nodes)); + REQUIRE(shared_arc->valid_operation( + commands2[i].get_op_ptr()->get_type(), nodes)); } std::vector nodes; for (auto arg : commands2[4].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(!shared_arc->valid_operation(nodes)); + REQUIRE(!shared_arc->valid_operation( + commands2[4].get_op_ptr()->get_type(), nodes)); const auto u2 = tket_sim::get_unitary(circ2); REQUIRE(tket_sim::compare_statevectors_or_unitaries( u2, u1, tket_sim::MatrixEquivalence::EQUAL)); From 99f3c615aa23d41a52ef625870d18802b58b175f Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 14 Feb 2022 14:58:18 +0000 Subject: [PATCH 068/146] dummy pythonfile --- kyriakos.py | 122 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 kyriakos.py diff --git a/kyriakos.py b/kyriakos.py new file mode 100644 index 0000000000..ec7927a55b --- /dev/null +++ b/kyriakos.py @@ -0,0 +1,122 @@ + + +from pytket import Circuit +from pytket.predicates import CompilationUnit + +circ_dict = {'bits': [['c', [0]], + ['c', [1]], + ['c', [2]], + ['c', [3]], + ['c', [4]], + ['c', [5]], + ['tk_SCRATCH_BIT', [0]], + ['tk_SCRATCH_BIT', [1]], + ['tk_SCRATCH_BIT', [2]]], + 'commands': [{'args': [['q', [1]], ['q', [3]]], 'op': {'type': 'CZ'}}, + {'args': [['q', [1]], ['q', [2]]], 'op': {'type': 'CZ'}}, + {'args': [['q', [4]], ['q', [3]]], 'op': {'type': 'CZ'}}, + {'args': [['q', [1]], ['q', [0]]], 'op': {'type': 'CZ'}}, + {'args': [['q', [0]], + ['q', [1]], + ['q', [2]], + ['q', [3]], + ['q', [4]], + ['q', [5]], + ['c', [0]], + ['c', [1]], + ['c', [2]], + ['c', [3]], + ['c', [4]], + ['c', [5]]], + 'op': {'signature': ['Q', + 'Q', + 'Q', + 'Q', + 'Q', + 'Q', + 'C', + 'C', + 'C', + 'C', + 'C', + 'C'], + 'type': 'Barrier'}}, + {'args': [['q', [0]]], 'op': {'type': 'H'}}, + {'args': [['q', [1]]], 'op': {'type': 'H'}}, + {'args': [['q', [0]], ['c', [0]]], 'op': {'type': 'Measure'}}, + {'args': [['q', [1]], ['c', [1]]], 'op': {'type': 'Measure'}}, + {'args': [['q', [0]], + ['q', [1]], + ['q', [2]], + ['q', [3]], + ['q', [4]], + ['c', [0]], + ['c', [1]], + ['c', [2]], + ['c', [3]], + ['c', [4]]], + 'op': {'signature': ['Q', 'Q', 'Q', 'Q', 'Q', 'C', 'C', 'C', 'C', 'C'], + 'type': 'Barrier'}}, + {'args': [['q', [0]]], 'op': {'type': 'Reset'}}, + {'args': [['q', [1]]], 'op': {'type': 'Reset'}}, + {'args': [['q', [4]]], 'op': {'params': ['-0.25'], 'type': 'Rz'}}, + {'args': [['c', [1]], ['tk_SCRATCH_BIT', [0]]], + 'op': {'box': {'exp': {'args': [['c', [1]], False], 'op': 'BitWiseOp.XOR'}, + 'id': '12c10add-5033-437b-b911-f939f97203ed', + 'n_i': 1, + 'n_io': 0, + 'n_o': 1, + 'type': 'ClassicalExpBox'}, + 'type': 'ClassicalExpBox'}}, + {'args': [['c', [0]], ['tk_SCRATCH_BIT', [1]]], + 'op': {'box': {'exp': {'args': [['c', [0]], False], 'op': 'BitWiseOp.XOR'}, + 'id': '7d9e1fc7-dac1-4c52-8202-c480ef1897e0', + 'n_i': 1, + 'n_io': 0, + 'n_o': 1, + 'type': 'ClassicalExpBox'}, + 'type': 'ClassicalExpBox'}}, + {'args': [['c', [0]], ['tk_SCRATCH_BIT', [2]]], + 'op': {'box': {'exp': {'args': [['c', [0]], False], 'op': 'BitWiseOp.XOR'}, + 'id': '319a085c-42b6-4aa7-8348-cd588f6aa3f5', + 'n_i': 1, + 'n_io': 0, + 'n_o': 1, + 'type': 'ClassicalExpBox'}, + 'type': 'ClassicalExpBox'}}, + {'args': [['tk_SCRATCH_BIT', [0]], ['q', [2]]], + 'op': {'conditional': {'op': {'type': 'X'}, 'value': 1, 'width': 1}, + 'type': 'Conditional'}}, + {'args': [['tk_SCRATCH_BIT', [2]], ['q', [3]]], + 'op': {'conditional': {'op': {'type': 'Z'}, 'value': 1, 'width': 1}, + 'type': 'Conditional'}}, + {'args': [['tk_SCRATCH_BIT', [1]], ['q', [2]]], + 'op': {'conditional': {'op': {'type': 'Z'}, 'value': 1, 'width': 1}, + 'type': 'Conditional'}}, + {'args': [['q', [3]]], 'op': {'params': ['-0.25'], 'type': 'Rz'}}, + {'args': [['q', [2]]], 'op': {'params': ['-0.25'], 'type': 'Rz'}}], + 'implicit_permutation': [[['q', [0]], ['q', [0]]], + [['q', [1]], ['q', [1]]], + [['q', [2]], ['q', [2]]], + [['q', [3]], ['q', [3]]], + [['q', [4]], ['q', [4]]], + [['q', [5]], ['q', [5]]]], + 'phase': '0.0', + 'qubits': [['q', [0]], + ['q', [1]], + ['q', [2]], + ['q', [3]], + ['q', [4]], + ['q', [5]]]} + + +circ = Circuit.from_dict(circ_dict) + +cu = CompilationUnit(circ) +print(cu) + +from pytket.passes import FullMappingPass, RoutingPass, DefaultMappingPass +from pytket.architecture import SquareGrid + +DefaultMappingPass(SquareGrid(4,4)).apply(cu) +print(cu.circuit) \ No newline at end of file From 0e742a8f0d7887b7c78e4f85b6e2c56887cc3295 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 16:21:33 +0000 Subject: [PATCH 069/146] change copyright to 2022 --- tket/src/TokenSwapping/ArchitectureMapping.cpp | 2 +- tket/src/TokenSwapping/CyclesGrowthManager.cpp | 2 +- tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp | 2 +- tket/src/TokenSwapping/DistancesFromArchitecture.cpp | 2 +- tket/src/TokenSwapping/DistancesInterface.cpp | 2 +- tket/src/TokenSwapping/DynamicTokenTracker.cpp | 2 +- tket/src/TokenSwapping/NeighboursFromArchitecture.cpp | 2 +- tket/src/TokenSwapping/NeighboursInterface.cpp | 2 +- tket/src/TokenSwapping/PartialTsaInterface.cpp | 2 +- tket/src/TokenSwapping/RNG.cpp | 2 +- tket/src/TokenSwapping/SwapListOptimiser.cpp | 2 +- tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp | 2 +- tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp | 2 +- tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp | 2 +- tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp | 2 +- tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp | 3 +-- tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp | 2 +- tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp | 3 +-- tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp | 2 +- .../src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp | 2 +- tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp | 2 +- tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp | 2 +- tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp | 2 +- tket/src/TokenSwapping/VectorListHybridSkeleton.cpp | 2 +- .../include/TokenSwapping/ArchitectureMapping.hpp | 2 +- .../include/TokenSwapping/CanonicalRelabelling.hpp | 2 +- .../include/TokenSwapping/CyclesGrowthManager.hpp | 2 +- .../include/TokenSwapping/DistancesFromArchitecture.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/DistancesInterface.hpp | 2 +- .../include/TokenSwapping/DynamicTokenTracker.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp | 2 +- .../include/TokenSwapping/NeighboursFromArchitecture.hpp | 2 +- .../include/TokenSwapping/NeighboursInterface.hpp | 2 +- .../include/TokenSwapping/PartialMappingLookup.hpp | 2 +- tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp | 2 +- .../src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp | 2 +- tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp | 2 +- .../include/TokenSwapping/SwapListSegmentOptimiser.hpp | 2 +- .../include/TokenSwapping/SwapListTableOptimiser.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp | 2 +- .../include/TokenSwapping/VectorListHybridSkeleton.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp | 2 +- .../include/TokenSwapping/VertexMappingFunctions.hpp | 2 +- .../TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp | 2 +- .../include/TokenSwapping/main_entry_functions.hpp | 2 +- tket/src/TokenSwapping/main_entry_functions.cpp | 2 +- tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp | 2 +- tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp | 2 +- tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp | 2 +- tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp | 2 +- tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp | 2 +- tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp | 2 +- tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp | 2 +- tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp | 2 +- .../TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp | 2 +- .../TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp | 2 +- .../TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp | 2 +- .../TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp | 2 +- .../TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp | 2 +- .../tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp | 2 +- .../TestUtils/ArchitectureEdgesReimplementation.cpp | 2 +- .../TestUtils/ArchitectureEdgesReimplementation.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp | 2 +- tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp | 2 +- tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp | 2 +- tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp | 2 +- tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp | 2 +- tket/tests/TokenSwapping/test_SwapList.cpp | 2 +- tket/tests/TokenSwapping/test_VectorListHybrid.cpp | 2 +- tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp | 2 +- tket/tests/TokenSwapping/test_main_entry_functions.cpp | 2 +- 81 files changed, 81 insertions(+), 83 deletions(-) diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index 7f6e08dbfc..8011d3917a 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.cpp b/tket/src/TokenSwapping/CyclesGrowthManager.cpp index 894494e5d0..2797924868 100644 --- a/tket/src/TokenSwapping/CyclesGrowthManager.cpp +++ b/tket/src/TokenSwapping/CyclesGrowthManager.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp index f5fe4a0050..88ff1228e6 100644 --- a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp index e8c629c87c..3ae327ceda 100644 --- a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp +++ b/tket/src/TokenSwapping/DistancesFromArchitecture.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/DistancesInterface.cpp b/tket/src/TokenSwapping/DistancesInterface.cpp index 35363c7505..a55d6b1f3f 100644 --- a/tket/src/TokenSwapping/DistancesInterface.cpp +++ b/tket/src/TokenSwapping/DistancesInterface.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/DynamicTokenTracker.cpp b/tket/src/TokenSwapping/DynamicTokenTracker.cpp index fe0e1dc234..18c65ba2ff 100644 --- a/tket/src/TokenSwapping/DynamicTokenTracker.cpp +++ b/tket/src/TokenSwapping/DynamicTokenTracker.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp index d93cfc8b13..57bc21724a 100644 --- a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp +++ b/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/NeighboursInterface.cpp b/tket/src/TokenSwapping/NeighboursInterface.cpp index 7a5773e33c..805ffa02f7 100644 --- a/tket/src/TokenSwapping/NeighboursInterface.cpp +++ b/tket/src/TokenSwapping/NeighboursInterface.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/PartialTsaInterface.cpp b/tket/src/TokenSwapping/PartialTsaInterface.cpp index afac5357dd..f80248db72 100644 --- a/tket/src/TokenSwapping/PartialTsaInterface.cpp +++ b/tket/src/TokenSwapping/PartialTsaInterface.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/RNG.cpp b/tket/src/TokenSwapping/RNG.cpp index ab2c48e70d..daa3c05e4a 100644 --- a/tket/src/TokenSwapping/RNG.cpp +++ b/tket/src/TokenSwapping/RNG.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/SwapListOptimiser.cpp b/tket/src/TokenSwapping/SwapListOptimiser.cpp index 42773437ce..eddf3ae63f 100644 --- a/tket/src/TokenSwapping/SwapListOptimiser.cpp +++ b/tket/src/TokenSwapping/SwapListOptimiser.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp index dd75e1c98f..97041c0131 100644 --- a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp index 282b2efd36..870e51ed75 100644 --- a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index 8b812d53eb..c5445774cd 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp index 51c1c65fcc..b3ea011b44 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp index 1a58535764..558f3b712f 100644 --- a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ #include "Utils/Assert.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp index 9b75744ae6..19a0a14c38 100644 --- a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp index 39fb0ccbdc..6960cf1b61 100644 --- a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp +++ b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,7 +20,6 @@ #include "TokenSwapping/SwapSequenceTable.hpp" #include "Utils/Assert.hpp" -; using std::vector; namespace tket { diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp index cc7c6a5dcb..d5a0239f4a 100644 --- a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp index 3a36792882..3c72369ce4 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp index 21a2597558..3687a546c1 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp index 7060995b1b..ba5d662c9e 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp index 24cd00adb2..201ad7e539 100644 --- a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp index b251f21244..b0a736b7b3 100644 --- a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp b/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp index dfe579bff4..44f006a555 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp index d509b5bcaa..98b37bf9b7 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp index 768c6b902e..68549cde8a 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp index 718b81d1e9..7f2dc0834d 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp index fa3488ba18..8f7f1a1063 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp index bc6e7ab8e1..c4be18d7c3 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp b/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp index dc3b1b8e72..ede9d94fca 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp index 5f36a4f7d8..e4abcaae33 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp index e32b531ebc..5a32f3cb5d 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp index 371552a37d..fe7be4387d 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp index deaa7dda72..ce2c8b5911 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp @@ -1,5 +1,5 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp b/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp index ac6fc7c73a..99f93d0e2b 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp index 58868ef21c..40cdde8425 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp index b8fe8063d5..ac74509b68 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp index c3ce30d5ff..6df1b15cd2 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapListOptimiser.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp index 2d180e06ed..87b5c72d31 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp @@ -1,5 +1,5 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp index ac815c2f7c..7a7532dd9a 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp @@ -1,5 +1,5 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp index 6595f46c6b..d043249950 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp index 2bf9649ca7..e8b3b64fde 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp index b8dd971001..d1e8677b42 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp index 752040e3b5..de78dae7e1 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp index 441678c4d4..a8742a2670 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp index 14a1c7d4c6..c5a776bf19 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/main_entry_functions.cpp b/tket/src/TokenSwapping/main_entry_functions.cpp index 7390632a06..12507f2463 100644 --- a/tket/src/TokenSwapping/main_entry_functions.cpp +++ b/tket/src/TokenSwapping/main_entry_functions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp index e1f06aee30..4ed7727aa1 100644 --- a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp +++ b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp index 9681980374..6b9222a971 100644 --- a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp +++ b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp b/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp index 7a1a339b94..87ef4a4953 100644 --- a/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp +++ b/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp b/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp index a4846faad8..ab28fca9f9 100644 --- a/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp +++ b/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp index ca4eea1c4f..c3f27050b2 100644 --- a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp index 504360f9f0..7cedbb075d 100644 --- a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp index 5c139c697e..c3a9692121 100644 --- a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp +++ b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp index 3b452ae42a..abd4532ce8 100644 --- a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp +++ b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp index 74e4a27ade..67e64f3bcb 100644 --- a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp index 2adcf5f6d0..1ba570a2d1 100644 --- a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp index da96d908c1..6e0fa8f2da 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp index c169f84058..3af1da4756 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp index e4818c14d2..11242ea2fc 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp index 6bc7ed97fa..d73070888d 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp index 87e33d2595..9b94b9a7a8 100644 --- a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp +++ b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp index b6ddcd8f39..b730ad7bd2 100644 --- a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp +++ b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp index ab7c31886b..23727c7107 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp index 65c142b932..6afa674f0b 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp index a489add6e7..569243fd02 100644 --- a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp index 7a10b40fa2..fa7d139570 100644 --- a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp index 730ba812b8..bb93aa43e2 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp index f51cea6763..2f6da5ceeb 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp index a8043b5a0d..2e81c5c82b 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp index 4cff62e020..7f45186ba7 100644 --- a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp +++ b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp index 182790a92b..efaaedd429 100644 --- a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp +++ b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp index 1980c5b9f9..431875618c 100644 --- a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp +++ b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp index 21a074d24d..6fedd4845d 100644 --- a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp +++ b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp index 648d220594..cf8d06d9f7 100644 --- a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp +++ b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp index 239e14d9be..446fc39e74 100644 --- a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp +++ b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_SwapList.cpp b/tket/tests/TokenSwapping/test_SwapList.cpp index 4769d470b5..184f24ca32 100644 --- a/tket/tests/TokenSwapping/test_SwapList.cpp +++ b/tket/tests/TokenSwapping/test_SwapList.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp index 238b5e38a6..9ae0d81a4f 100644 --- a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp +++ b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp index d959935366..57e09601f9 100644 --- a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp +++ b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/tests/TokenSwapping/test_main_entry_functions.cpp b/tket/tests/TokenSwapping/test_main_entry_functions.cpp index a0a110a0f6..59d31a27de 100644 --- a/tket/tests/TokenSwapping/test_main_entry_functions.cpp +++ b/tket/tests/TokenSwapping/test_main_entry_functions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. From ca30a508c5634fe0c18c223828b8a9452a87f4f5 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 16:27:46 +0000 Subject: [PATCH 070/146] remove unused code, including PathFinderInterface --- tket/src/TokenSwapping/BestFullTsa.cpp | 7 +- .../TokenSwapping/CyclesCandidateManager.cpp | 6 +- tket/src/TokenSwapping/CyclesPartialTsa.cpp | 17 +--- .../src/TokenSwapping/PathFinderInterface.cpp | 43 ---------- .../src/TokenSwapping/RiverFlowPathFinder.cpp | 8 +- tket/src/TokenSwapping/TrivialTSA.cpp | 11 ++- .../TokenSwapping/CyclesCandidateManager.hpp | 5 +- .../TokenSwapping/CyclesPartialTsa.hpp | 18 +--- .../TokenSwapping/PartialTsaInterface.hpp | 6 +- .../TokenSwapping/PathFinderInterface.hpp | 86 ------------------- .../TokenSwapping/RiverFlowPathFinder.hpp | 52 +++++------ .../include/TokenSwapping/TrivialTSA.hpp | 10 +-- 12 files changed, 50 insertions(+), 219 deletions(-) delete mode 100644 tket/src/TokenSwapping/PathFinderInterface.cpp delete mode 100644 tket/src/TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp diff --git a/tket/src/TokenSwapping/BestFullTsa.cpp b/tket/src/TokenSwapping/BestFullTsa.cpp index 30c30bbaaa..fafce59fca 100644 --- a/tket/src/TokenSwapping/BestFullTsa.cpp +++ b/tket/src/TokenSwapping/BestFullTsa.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,9 +24,6 @@ namespace tsa_internal { BestFullTsa::BestFullTsa() { m_name = "BestFullTsa"; } -// HybridTsa00& BestFullTsa::get_hybrid_tsa_for_testing() { return m_hybrid_tsa; -// } - void BestFullTsa::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, const ArchitectureMapping& arch_mapping) { @@ -41,7 +38,7 @@ void BestFullTsa::append_partial_solution( void BestFullTsa::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& neighbours, - PathFinderInterface& path_finder) { + RiverFlowPathFinder& path_finder) { auto vm_copy = vertex_mapping; m_hybrid_tsa.append_partial_solution( diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.cpp b/tket/src/TokenSwapping/CyclesCandidateManager.cpp index 13a5698a64..fdf52a2286 100644 --- a/tket/src/TokenSwapping/CyclesCandidateManager.cpp +++ b/tket/src/TokenSwapping/CyclesCandidateManager.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -26,10 +26,6 @@ using std::vector; namespace tket { namespace tsa_internal { -CyclesCandidateManager::Options& CyclesCandidateManager::get_options() { - return m_options; -} - size_t CyclesCandidateManager::fill_initial_cycle_ids(const Cycles& cycles) { m_cycle_with_vertex_hash.clear(); m_cycles_to_keep.clear(); diff --git a/tket/src/TokenSwapping/CyclesPartialTsa.cpp b/tket/src/TokenSwapping/CyclesPartialTsa.cpp index 046488f18d..d6cf13f523 100644 --- a/tket/src/TokenSwapping/CyclesPartialTsa.cpp +++ b/tket/src/TokenSwapping/CyclesPartialTsa.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,20 +23,10 @@ namespace tsa_internal { CyclesPartialTsa::CyclesPartialTsa() { m_name = "Cycles"; } -// GCOVR_EXCL_START -CyclesGrowthManager::Options& CyclesPartialTsa::growth_options() { - return m_growth_manager.get_options(); -} - -CyclesCandidateManager::Options& CyclesPartialTsa::candidate_options() { - return m_candidate_manager.get_options(); -} -// GCOVR_EXCL_STOP - void CyclesPartialTsa::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& neighbours, - PathFinderInterface& path_finder) { + RiverFlowPathFinder& path_finder) { // We'll add the calculated swaps to the path finder at the end. // THIS is the right place to do it, not the caller, because // (as far as the caller knows) it's possible that PartialTSA objects @@ -56,8 +46,7 @@ void CyclesPartialTsa::append_partial_solution( } const size_t final_swap_size = swaps.size(); TKET_ASSERT(initial_swap_size <= final_swap_size); - if (initial_swap_size == final_swap_size || - !path_finder.edge_registration_has_effect()) { + if (initial_swap_size == final_swap_size) { return; } // At least one swap was added. diff --git a/tket/src/TokenSwapping/PathFinderInterface.cpp b/tket/src/TokenSwapping/PathFinderInterface.cpp deleted file mode 100644 index d8d03169e6..0000000000 --- a/tket/src/TokenSwapping/PathFinderInterface.cpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019-2021 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "PathFinderInterface.hpp" - -#include "Utils/Exceptions.hpp" - -namespace tket { -namespace tsa_internal { - -PathFinderInterface::PathFinderInterface() : m_name("Empty") {} - -PathFinderInterface::~PathFinderInterface() {} - -// GCOVR_EXCL_START -const std::vector& PathFinderInterface::operator()( - size_t /*vertex1*/, size_t /*vertex2*/) { - throw NotImplemented("PathFinderInterface: get path"); -} - -const std::string& PathFinderInterface::name() const { return m_name; } - -void PathFinderInterface::reset() {} - -void PathFinderInterface::register_edge( - size_t /*vertex1*/, size_t /*vertex2*/) {} - -bool PathFinderInterface::edge_registration_has_effect() const { return false; } -// GCOVR_EXCL_STOP - -} // namespace tsa_internal -} // namespace tket diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp index e80ab7516a..03d1e47368 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -149,9 +149,7 @@ RiverFlowPathFinder::RiverFlowPathFinder( DistancesInterface& distances_interface, NeighboursInterface& neighbours_interface, RNG& rng) : m_pimpl(std::make_unique( - distances_interface, neighbours_interface, rng)) { - m_name = "RiverFlow"; -} + distances_interface, neighbours_interface, rng)) {} RiverFlowPathFinder::~RiverFlowPathFinder() {} @@ -188,7 +186,5 @@ void RiverFlowPathFinder::register_edge(size_t vertex1, size_t vertex2) { ++edge_count; } -bool RiverFlowPathFinder::edge_registration_has_effect() const { return true; } - } // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/TrivialTSA.cpp b/tket/src/TokenSwapping/TrivialTSA.cpp index 7113ae236b..a2053de242 100644 --- a/tket/src/TokenSwapping/TrivialTSA.cpp +++ b/tket/src/TokenSwapping/TrivialTSA.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ #include #include "CyclicShiftCostEstimate.hpp" -#include "TokenSwapping/DebugFunctions.hpp" #include "TokenSwapping/DistanceFunctions.hpp" #include "TokenSwapping/GeneralFunctions.hpp" #include "TokenSwapping/VertexSwapResult.hpp" @@ -158,13 +157,13 @@ void TrivialTSA::fill_disjoint_abstract_cycles( void TrivialTSA::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& /*not needed*/, - PathFinderInterface& path_finder) { + RiverFlowPathFinder& path_finder) { append_partial_solution(swaps, vertex_mapping, distances, path_finder); } void TrivialTSA::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, - DistancesInterface& distances, PathFinderInterface& path_finder) { + DistancesInterface& distances, RiverFlowPathFinder& path_finder) { if (all_tokens_home(vertex_mapping)) { return; } @@ -231,7 +230,7 @@ void TrivialTSA::copy_vertices_to_work_vector(const Endpoints& endpoints) { void TrivialTSA::append_partial_solution_with_all_cycles( SwapList& swaps, VertexMapping& vertex_mapping, - PathFinderInterface& path_finder) { + RiverFlowPathFinder& path_finder) { for (const auto& endpoints : m_cycle_endpoints) { copy_vertices_to_work_vector(endpoints); if (m_vertices_work_vector.size() < 2) { @@ -255,7 +254,7 @@ void TrivialTSA::append_partial_solution_with_all_cycles( size_t TrivialTSA::append_partial_solution_with_single_cycle( const Endpoints& endpoints, size_t start_v_index, SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, - PathFinderInterface& path_finder) { + RiverFlowPathFinder& path_finder) { copy_vertices_to_work_vector(endpoints); TKET_ASSERT(m_vertices_work_vector.size() >= 2); TKET_ASSERT(start_v_index < m_vertices_work_vector.size()); diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp index 36cb7b84fd..de4a187e15 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -89,9 +89,6 @@ class CyclesCandidateManager { unsigned min_candidate_power_percentage = 0; }; - /// Provide access to the options used, to change them if desired. - Options& get_options(); - /** The "CyclesGrowthManager" object stores the candidate cycles internally, * then we select the set of candidates to use, convert them into swaps, * and append them to the list of swaps. (All distance data has already diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp index c393270694..e0ebe7185f 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -53,20 +53,6 @@ class CyclesPartialTsa : public PartialTsaInterface { public: CyclesPartialTsa(); - /** Access the options of the inner stored object, to change behaviour. - * TODO: do many experiments, to find the best possible parameters. - * Then, set them as defaults and possibly remove this function. - * @return Options controlling behaviour of cycle growing. - */ - CyclesGrowthManager::Options& growth_options(); - - /** Access the options of the inner stored object, to change behaviour. - * TODO: do many experiments, to find the best possible parameters. - * @return Options controlling the filtering and selection of candidate - * cycles to convert to swaps. - */ - CyclesCandidateManager::Options& candidate_options(); - /** Calculate a solution to improve the current token configuarion, * add the swaps to the list, and carry out the swaps on "vertex_mapping". * We don't need a path finder because the cycles are built up one vertex @@ -87,7 +73,7 @@ class CyclesPartialTsa : public PartialTsaInterface { virtual void append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& neighbours, - PathFinderInterface& path_finder) override; + RiverFlowPathFinder& path_finder) override; private: /** Stores cycles, and controls the growth and discarding of cycles. diff --git a/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp index 2479a1907d..bdf935a6b1 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ #include "DistancesInterface.hpp" #include "NeighboursInterface.hpp" -#include "PathFinderInterface.hpp" +#include "RiverFlowPathFinder.hpp" #include "VertexMappingFunctions.hpp" namespace tket { @@ -53,7 +53,7 @@ class PartialTsaInterface { virtual void append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& neighbours, - PathFinderInterface& path_finder) = 0; + RiverFlowPathFinder& path_finder) = 0; /** For debugging purposes, every TSA object has a name. * @return The name of this object (not necessarily unique). diff --git a/tket/src/TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp deleted file mode 100644 index a6548f9fd4..0000000000 --- a/tket/src/TokenSwapping/include/TokenSwapping/PathFinderInterface.hpp +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2019-2021 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include - -namespace tket { -namespace tsa_internal { - -/** What is SOME shortest path between vertices? - * This might involve an arbitrary choice, - * because some paths will not be unique if the graph is not a tree. - * For algorithms, we might need to choose in a vaguely consistent way, - * and use a random number generator. - */ -class PathFinderInterface { - public: - PathFinderInterface(); - - /** By default, simply throws (not implemented). - * Returns a shortest path from v1 to v2, including v1 at the start - * and v2 at the end. This should usually return the same result for - * (v1, v2) each time it is called, but may change slightly over time. - * Although the path is stored internally, there's no guarantee - * that the reference will remain valid once another function call occurs. - * There's no guarantee that the path for (v1, v2) will be the reverse of - * the path for (v2, v1). - * Could take time O(length of path), if it is built up anew each time. - * @param vertex1 First vertex v1. - * @param vertex2 Second vertex v2. - * @return A list of vertices, starting with v1 and ending with v2, - * giving a shortest path from v1 to v2 (not unique, maybe not constant - * over time, and maybe not a valid reference after any other call). - */ - virtual const std::vector& operator()(size_t vertex1, size_t vertex2); - - virtual ~PathFinderInterface(); - - /** Some path finders use randomness; if so, override this to reset - * the source of randomness to some default seed - * to ensure reproducibility. By default, does nothing. - */ - virtual void reset(); - - /** If some other algorithm has made use of an edge v1-v2, - * without going through this path finder object, - * call this function to inform this object. - * (E.g., some classes remember which previous operator() calls were made, - * and use them to decide future paths when there is a nonunique choice). - * By default, does nothing. - * @param vertex1 First vertex v1. - * @param vertex2 Second vertex v2. - */ - virtual void register_edge(size_t vertex1, size_t vertex2); - - /** For convenience, if "register_edge" does nothing, return false so that the - * caller knows and doesn't waste time repeatedly calling "register_edge". - * @return True if the function "register_edge" has been overridden to do - * something, false if the function does nothing - */ - virtual bool edge_registration_has_effect() const; - - /** For debugging purposes, every object has a name. - * @return The name of the object. - */ - const std::string& name() const; - - protected: - std::string m_name; -}; - -} // namespace tsa_internal -} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp b/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp index 23546388df..daaab1609e 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,24 +20,29 @@ #include "DistancesInterface.hpp" #include "NeighboursInterface.hpp" -#include "PathFinderInterface.hpp" #include "RNG.hpp" namespace tket { namespace tsa_internal { -/** Think of flowing water: if it has already flowed through, it creates - * channels along which it is more likely to flow next time. - * We do a similar idea: the PURPOSE is to try to make paths overlap; - * if we move tokens along paths with many edges in common, it is more likely - * that some basic swap optimisation will reduce the number of swaps. - * (Disjoint swaps are the worst kind to optimise, of course; - * no reduction is possible). +/** Given two vertices in a graph, find a shortest path between them; + * of course paths might not be unique. + * The aim is to make paths overlap; + * if we move tokens along paths with many edges in common, it is more likely + * that some basic swap optimisation will reduce the number of swaps. + * (Disjoint swaps are the worst kind to optimise, of course; + * no reduction is possible). * - * This is supposed to be reasonably fast. Repeated calls to operator()(v1,v2) - * are likely to return the same path, but may change slightly over time. + * We think of flowing water: if water has already flowed through, + * it creates channels along which it is more likely to flow next time. + * We do a similar thing: by remembering which edges have already been used, + * whenever we have a choice of edge to continue a path, choose one which + * has already been used frequently. + * + * Repeated calls to operator()(v1,v2) + * are likely to return the same path, but may change slightly over time. */ -class RiverFlowPathFinder : public PathFinderInterface { +class RiverFlowPathFinder { public: /** All the objects should remain valid throughout * the lifetime of this object. @@ -61,36 +66,31 @@ class RiverFlowPathFinder : public PathFinderInterface { * partially finished problems, even though the end-to-end problem * is the same). */ - virtual void reset() override; + void reset(); - /** Get the path from v1 to v2. As always, may change over time; + /** Get the path from v1 to v2. May change over time, and * path(v1, v2) is NOT necessarily the reverse of path(v2, v1). * @param vertex1 First vertex v1. * @param vertex2 Second vertex v2. * @return A list of vertices, starting with v1 and ending with v2, * giving a shortest path from v1 to v2. */ - virtual const std::vector& operator()( - size_t vertex1, size_t vertex2) override; + const std::vector& operator()(size_t vertex1, size_t vertex2); - virtual ~RiverFlowPathFinder(); + ~RiverFlowPathFinder(); - /** We really do want to know which edges have been used in the solution so - * far, that's the whole point of this class. + /** Whenever an edge is used, i.e. we swap tokens along it, tell this + * object; the proper functioning of this class depends on + * knowing which edges have been used in the solution so far. * @param vertex1 First vertex v1 of an edge v1-v2 that was used in the * solution. * @param vertex2 Second vertex v2 of the edge. */ - virtual void register_edge(size_t vertex1, size_t vertex2) override; - - /** Returns true for this object, since we definitely do want to remember - * previous edges. - * @return True, always, for this class. - */ - virtual bool edge_registration_has_effect() const override; + void register_edge(size_t vertex1, size_t vertex2); private: struct Impl; + /** Pimpl idiom. */ std::unique_ptr m_pimpl; }; diff --git a/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp b/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp index 7260e5c9f0..bfa1f50b00 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -71,7 +71,7 @@ class TrivialTSA : public PartialTsaInterface { virtual void append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& /*not_needed*/, - PathFinderInterface& path_finder) override; + RiverFlowPathFinder& path_finder) override; /** The same as the standard append_partial_solution interface, * but without needing to pass in a NeighboursInterface. @@ -83,7 +83,7 @@ class TrivialTSA : public PartialTsaInterface { */ void append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, - DistancesInterface& distances, PathFinderInterface& path_finder); + DistancesInterface& distances, RiverFlowPathFinder& path_finder); private: // NOTE: the reason this is all a bit more complicated (and so, the word @@ -181,7 +181,7 @@ class TrivialTSA : public PartialTsaInterface { */ void append_partial_solution_with_all_cycles( SwapList& swaps, VertexMapping& vertex_mapping, - PathFinderInterface& path_finder); + RiverFlowPathFinder& path_finder); /** Perform the single abstract cycle, but breaking off as soon as * the overall total home distance (L) decreases. @@ -214,7 +214,7 @@ class TrivialTSA : public PartialTsaInterface { // L (the sum of the distances to home) must decrease // by at least this amount, to break off early. SwapList& swaps, VertexMapping& vertex_mapping, - DistancesInterface& distances, PathFinderInterface& path_finder); + DistancesInterface& distances, RiverFlowPathFinder& path_finder); }; } // namespace tsa_internal From 3366e223a61ba12328b03a68dcf5187d86faef7f Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 16:39:24 +0000 Subject: [PATCH 071/146] rename HybridTsa00 -> HybridTsa; move files out of include directory --- .../CyclicShiftCostEstimate.hpp | 2 +- .../{HybridTsa00.cpp => HybridTsa.cpp} | 17 +++++------------ .../{HybridTsa00.hpp => HybridTsa.hpp} | 19 ++++--------------- 3 files changed, 10 insertions(+), 28 deletions(-) rename tket/src/TokenSwapping/{include/TokenSwapping => }/CyclicShiftCostEstimate.hpp (98%) rename tket/src/TokenSwapping/{HybridTsa00.cpp => HybridTsa.cpp} (79%) rename tket/src/TokenSwapping/include/TokenSwapping/{HybridTsa00.hpp => HybridTsa.hpp} (74%) diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclicShiftCostEstimate.hpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp similarity index 98% rename from tket/src/TokenSwapping/include/TokenSwapping/CyclicShiftCostEstimate.hpp rename to tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp index 647839e9ac..fa3111afd5 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/CyclicShiftCostEstimate.hpp +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tket/src/TokenSwapping/HybridTsa00.cpp b/tket/src/TokenSwapping/HybridTsa.cpp similarity index 79% rename from tket/src/TokenSwapping/HybridTsa00.cpp rename to tket/src/TokenSwapping/HybridTsa.cpp index 9f5df1fd19..b11b4b173e 100644 --- a/tket/src/TokenSwapping/HybridTsa00.cpp +++ b/tket/src/TokenSwapping/HybridTsa.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "HybridTsa00.hpp" +#include "HybridTsa.hpp" #include "TokenSwapping/DistanceFunctions.hpp" #include "Utils/Assert.hpp" @@ -22,23 +22,16 @@ using std::vector; namespace tket { namespace tsa_internal { -HybridTsa00::HybridTsa00() { +HybridTsa::HybridTsa() { m_name = "HybridTSA_00"; m_trivial_tsa.set(TrivialTSA::Options::BREAK_AFTER_PROGRESS); } -// GCOVR_EXCL_START -CyclesPartialTsa& HybridTsa00::get_cycles_tsa_for_testing() { - return m_cycles_tsa; -} - -TrivialTSA& HybridTsa00::get_trivial_tsa_for_testing() { return m_trivial_tsa; } -// GCOVR_EXCL_STOP -void HybridTsa00::append_partial_solution( +void HybridTsa::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& neighbours, - PathFinderInterface& path_finder) { + RiverFlowPathFinder& path_finder) { const auto initial_L = get_total_home_distances(vertex_mapping, distances); for (size_t counter = initial_L + 1; counter > 0; --counter) { const auto swaps_before = swaps.size(); diff --git a/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa00.hpp b/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa.hpp similarity index 74% rename from tket/src/TokenSwapping/include/TokenSwapping/HybridTsa00.hpp rename to tket/src/TokenSwapping/include/TokenSwapping/HybridTsa.hpp index 3c5d86d9b4..f64ecd1335 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa00.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ namespace tsa_internal { /** A full end-to-end TSA, combining the partial cycles TSA * (hopefully good) with the full "trivial" TSA (not so good). */ -class HybridTsa00 : public PartialTsaInterface { +class HybridTsa : public PartialTsaInterface { public: - HybridTsa00(); + HybridTsa(); /** For the current token configuration, calculate a sequence of swaps * to move all tokens home, and append them to the given list. @@ -41,18 +41,7 @@ class HybridTsa00 : public PartialTsaInterface { virtual void append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& neighbours, - PathFinderInterface& path_finder) override; - - /** Only for experiments; will be removed again - * once the best parameter combinations are found! - * @return A reference to the internal TSA object, to change parameters. - */ - CyclesPartialTsa& get_cycles_tsa_for_testing(); - - /** Temporary; only for experiments! - * @return A reference to the internal TSA object, to change parameters. - */ - TrivialTSA& get_trivial_tsa_for_testing(); + RiverFlowPathFinder& path_finder) override; private: CyclesPartialTsa m_cycles_tsa; From b67528cfec0e357b42251959f9926777150ec742 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 16:45:28 +0000 Subject: [PATCH 072/146] move DebugFunctions into tests --- tket/src/TokenSwapping/CMakeLists.txt | 4 +--- .../TestUtils}/DebugFunctions.cpp | 14 +++++++++-- .../TestUtils}/DebugFunctions.hpp | 23 +++++++++++++++++-- tket/tests/tkettestsfiles.cmake | 1 + 4 files changed, 35 insertions(+), 7 deletions(-) rename tket/{src/TokenSwapping/TSAUtils => tests/TokenSwapping/TestUtils}/DebugFunctions.cpp (72%) rename tket/{src/TokenSwapping/include/TokenSwapping => tests/TokenSwapping/TestUtils}/DebugFunctions.hpp (52%) diff --git a/tket/src/TokenSwapping/CMakeLists.txt b/tket/src/TokenSwapping/CMakeLists.txt index 3d135284f2..14e74dacac 100644 --- a/tket/src/TokenSwapping/CMakeLists.txt +++ b/tket/src/TokenSwapping/CMakeLists.txt @@ -28,18 +28,16 @@ add_library(tket-${COMP} DistancesFromArchitecture.cpp DistancesInterface.cpp DynamicTokenTracker.cpp - HybridTsa00.cpp + HybridTsa.cpp main_entry_functions.cpp NeighboursFromArchitecture.cpp NeighboursInterface.cpp PartialTsaInterface.cpp - PathFinderInterface.cpp RiverFlowPathFinder.cpp RNG.cpp SwapListOptimiser.cpp TrivialTSA.cpp VectorListHybridSkeleton.cpp - TSAUtils/DebugFunctions.cpp TSAUtils/DistanceFunctions.cpp TSAUtils/GeneralFunctions.cpp TSAUtils/SwapFunctions.cpp diff --git a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp b/tket/tests/TokenSwapping/TestUtils/DebugFunctions.cpp similarity index 72% rename from tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp rename to tket/tests/TokenSwapping/TestUtils/DebugFunctions.cpp index ce4943e493..70070c8889 100644 --- a/tket/src/TokenSwapping/TSAUtils/DebugFunctions.cpp +++ b/tket/tests/TokenSwapping/TestUtils/DebugFunctions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "TokenSwapping/DebugFunctions.hpp" +#include "DebugFunctions.hpp" #include @@ -40,5 +40,15 @@ std::string str(const std::vector& swaps) { return ss.str(); } +size_t get_swaps_lower_bound( + const VertexMapping& vertex_mapping, + DistancesInterface& distances_calculator) { + // Each swap decreases the sum by at most 2 (and more likely 1 in many cases, + // if the mapping is sparse), so we need >= sum/2. But it's an integer of + // course. + return (get_total_home_distances(vertex_mapping, distances_calculator) + 1) / + 2; +} + } // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DebugFunctions.hpp b/tket/tests/TokenSwapping/TestUtils/DebugFunctions.hpp similarity index 52% rename from tket/src/TokenSwapping/include/TokenSwapping/DebugFunctions.hpp rename to tket/tests/TokenSwapping/TestUtils/DebugFunctions.hpp index a2fa9f1625..3d4dd0f9e8 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DebugFunctions.hpp +++ b/tket/tests/TokenSwapping/TestUtils/DebugFunctions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,8 @@ #include -#include "VertexMappingFunctions.hpp" +#include "TokenSwapping/DistanceFunctions.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" namespace tket { namespace tsa_internal { @@ -42,5 +43,23 @@ std::string str(const SwapList& swaps); */ std::string str(const std::vector& swaps); +/** A simple theoretical lower bound on the number of swaps necessary + * to achieve a given vertex mapping. (Of course it is not always possible + * to achieve this bound. But the algorithm in the 2016 paper + * "Approximation and Hardness of Token Swapping", for example, guarantees + * to find a solution within a factor of 4, or a factor of 2 for trees, + * in the case where every vertex has a token). + * TODO: What happens if some vertices are empty? Not considered in the 2016 + * paper! Need to think about it. This is still a lower bound, but how close? + * @param vertex_mapping current source->target mapping. + * @param distances An object to calculate distances between vertices. + * @return A number S such that every possible solution has >= S swaps. + * However, note that the true minimum value might be larger, but finding + * the value seems about as hard as finding an actual solution, and thus + * is possibly exponentially hard (seems to be unknown, even for trees). + */ +size_t get_swaps_lower_bound( + const VertexMapping& vertex_mapping, DistancesInterface& distances); + } // namespace tsa_internal } // namespace tket diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 4fcdd0481e..d5ed8fcc20 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -51,6 +51,7 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/BestTsaTester.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DebugFunctions.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DecodedProblemData.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/FullTsaTesting.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/PartialTsaTesting.cpp From db33a8628bfa7a003b4a39cbcc523128fbee67f6 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 16:47:13 +0000 Subject: [PATCH 073/146] move get_swaps_lower_bound out of tket into tests --- .../TSAUtils/DistanceFunctions.cpp | 12 +---------- .../TokenSwapping/DistanceFunctions.hpp | 20 +------------------ .../TestUtils/FullTsaTesting.cpp | 4 ++-- 3 files changed, 4 insertions(+), 32 deletions(-) diff --git a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp index 7463e35b09..cc16d974cd 100644 --- a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -50,15 +50,5 @@ int get_swap_decrease( get_move_decrease(vertex_mapping, v2, v1, distances); } -size_t get_swaps_lower_bound( - const VertexMapping& vertex_mapping, - DistancesInterface& distances_calculator) { - // Each swap decreases the sum by at most 2 (and more likely 1 in many cases, - // if the mapping is sparse), so we need >= sum/2. But it's an integer of - // course. - return (get_total_home_distances(vertex_mapping, distances_calculator) + 1) / - 2; -} - } // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp index 43612bfeb5..ae72a71a0a 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -66,23 +66,5 @@ int get_swap_decrease( const VertexMapping& vertex_mapping, size_t v1, size_t v2, DistancesInterface& distances); -/** A simple theoretical lower bound on the number of swaps necessary - * to achieve a given vertex mapping. (Of course it is not always possible - * to achieve this bound. But the algorithm in the 2016 paper - * "Approximation and Hardness of Token Swapping", for example, guarantees - * to find a solution within a factor of 4, or a factor of 2 for trees, - * in the case where every vertex has a token). - * TODO: What happens if some vertices are empty? Not considered in the 2016 - * paper! Need to think about it. This is still a lower bound, but how close? - * @param vertex_mapping current source->target mapping. - * @param distances An object to calculate distances between vertices. - * @return A number S such that every possible solution has >= S swaps. - * However, note that the true minimum value might be larger, but finding - * the value seems about as hard as finding an actual solution, and thus - * is possibly exponentially hard (seems to be unknown, even for trees). - */ -size_t get_swaps_lower_bound( - const VertexMapping& vertex_mapping, DistancesInterface& distances); - } // namespace tsa_internal } // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp index 7659964336..44f6b56c40 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ #include +#include "DebugFunctions.hpp" #include "TokenSwapping/ArchitectureMapping.hpp" -#include "TokenSwapping/DebugFunctions.hpp" #include "TokenSwapping/DistanceFunctions.hpp" #include "TokenSwapping/DistancesFromArchitecture.hpp" #include "TokenSwapping/NeighboursFromArchitecture.hpp" From 3e518df1c7b32c2d1c5d08e8be87a81de534f191 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 16:52:07 +0000 Subject: [PATCH 074/146] simple typos; unused code; extra comments, asserts --- .../TableLookup/SwapConversion.cpp | 3 ++- .../TableLookup/SwapListTableOptimiser.cpp | 5 ++-- .../include/TokenSwapping/BestFullTsa.hpp | 16 ++++------- .../TokenSwapping/FilteredSwapSequences.hpp | 27 +++++++++++++++++-- .../TokenSwapping/SwapSequenceTable.hpp | 21 +++++++++------ tket/src/Utils/include/Utils/Assert.hpp | 21 ++++++++------- .../TableLookup/test_ExactMappingLookup.cpp | 4 +-- .../TestUtils/PartialTsaTesting.cpp | 9 +++---- .../TestUtils/PartialTsaTesting.hpp | 4 +-- tket/tests/TokenSwapping/test_FullTsa.cpp | 6 ++--- .../TokenSwapping/test_SwapListOptimiser.cpp | 4 +-- .../TokenSwapping/test_VariousPartialTsa.cpp | 4 +-- 12 files changed, 74 insertions(+), 50 deletions(-) diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp index 0c8d8ad3ad..4fe2124468 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,6 +38,7 @@ static const vector& get_swaps_global() { } const Swap& SwapConversion::get_swap_from_hash(SwapHash x) { + TKET_ASSERT(x >= 1 && x <= 15); return get_swaps_global().at(x - 1); } diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp index 6478942473..a88a4a7e22 100644 --- a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ #include #include -#include "TokenSwapping/DebugFunctions.hpp" #include "Utils/Assert.hpp" namespace tket { @@ -74,7 +73,7 @@ static bool erase_empty_swaps_interval( } } // Should never get here! - TKET_ASSERT(!"erase_empty_swaps_interval falied to terminate"); + TKET_ASSERT(!"erase_empty_swaps_interval failed to terminate"); return false; } diff --git a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp index f476dbc0e8..ee531499ff 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ #pragma once #include "ArchitectureMapping.hpp" -#include "HybridTsa00.hpp" +#include "HybridTsa.hpp" #include "RNG.hpp" #include "SwapListOptimiser.hpp" #include "TokenSwapping/SwapListTableOptimiser.hpp" @@ -49,10 +49,10 @@ class BestFullTsa : public PartialTsaInterface { virtual void append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& neighbours, - PathFinderInterface& path_finder) override; + RiverFlowPathFinder& path_finder) override; /** Wrapper around the main append_partial_solution function, but constructing - * and using the best known PathFinderInterface object. The DistancesInterface + * and using the best known RiverFlowPathFinder object. The DistancesInterface * and NeighboursInterface objects will automatically be constructed. * @param swaps The list of swaps to append to. * @param vertex_mapping The current desired mapping. Will be updated with @@ -64,14 +64,8 @@ class BestFullTsa : public PartialTsaInterface { SwapList& swaps, VertexMapping& vertex_mapping, const ArchitectureMapping& arch_mapping); - /** For experiments, provide access to the internal stored TSA object. This - * function may be deleted later! - * @return Reference to the internal stored TSA object. - */ - // HybridTsa00& get_hybrid_tsa_for_testing(); - private: - HybridTsa00 m_hybrid_tsa; + HybridTsa m_hybrid_tsa; SwapListOptimiser m_swap_list_optimiser; SwapListTableOptimiser m_table_optimiser; RNG m_rng; diff --git a/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp b/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp index 72cea46c39..7a4ee5bccf 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,6 +24,29 @@ namespace tsa_internal { /** Takes a raw list of integers, where each integer represents a swap sequence * on the vertices {0,1,2,...,5} giving the same vertex permutation. + * + * NOTE: the magic number 5 (or 6) arises because we originally constructed + * the table by exhaustively constructing swap sequences on graphs with up to + * 6 vertices, up to a certain length. [Results were also merged together, + * e.g. the cycle C_6, or with a few extra edges added, can be searched + * in reasonable time to a longer length than K_6]. + * This was chosen because the complete graph K_6 has 15 edges, + * so conveniently each edge (or swap) can be represented by a number 1-15, + * and thus by a single hexadecimal digit. + * Thus, 4 bits are needed for each swap, so a 64-bit integer can represent + * swap sequences of length <= 16 (with 0 denoting the end of sequence). + * [Although, the table currently has entries only of length <= 12]. + * [Actually, it is not hard to prove - by considering "token tracking" - + * that optimal swap sequences on <= N vertices have + * length <= N(N-1)/2, the same as the number of edges of K_N. Thus length + * <= 15 already suffices to represent all possible optimal sequences + * on <= 6 vertices]. + * If we used 5 bits, we'd be able to represent sequences of length <= 12 + * (because 5*12 = 60 < 64) on graphs with <= 8 vertices (since + * 8*7/2 = 28 < 31). + * If we expand the table in future, we will probably design a whole new + * format, so we don't attempt to make it more generic at this stage. + * * Given such data, FilteredSwapSequences knows how to index and store it * somehow (exactly how is an implementation detail - it can be thought of * as a "database of swap sequences"), @@ -47,7 +70,7 @@ class FilteredSwapSequences { struct SingleSequenceData { /** The edges (i.e., swaps) actually used (or 0 if none are used). [This * could be computed from swaps_code but there is no need to recompute each - * time. */ + * time]. */ SwapConversion::EdgesBitset edges_bitset; /** An integer encoding a sequence of swaps. 0 means no swaps. */ diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp index 6ff7237f9a..3c9939c771 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -32,21 +32,26 @@ namespace tsa_internal { * with <= 6 vertices. Due to time/space limitations some non-complete graphs * were searched as well as complete graphs K4, K5, K6. * - * Of course, ideally we'd search K6 up to depth 16, but searching up to depth 9 + * Note that, by token tracking, any swap sequence of n vertices of length + * > n(n-1)/2 can be reduced in length, so in fact any optimal swap sequence + * on n vertices has length <= n(n-1)/2, the number of edges in + * the complete graph K(n). + * + * Of course, ideally we'd search K6 up to depth 15, but searching up to depth 9 * already consumed ~30 mins of CPU time and most of the memory capacity of an * ordinary laptop. More efficient exhaustive search algorithms with clever * pruning might cut it down a bit, but (since each added depth increases the * difficulty roughly by a factor of 14) it would require significant - * computational effort to reach even depth 12 for K6, and depth 16 probably - * requires a supercomputer, or a very large distributed computation). + * computational effort to reach even depth 12 for K6, and depth 15 probably + * requires a supercomputer, or a very large distributed computation, + * or significantly more intelligent methods). * * The table size is far smaller than the precomputation needed to create it. * The creation considered millions of sequences, but the table has only a few * thousand entries. * - * The table currently contains all swap sequences of length: - * <= 12 on 4 vertices (K4, depth 12); - * <= 10 on 5 vertices (K5, depth 10); + * The table currently contains ALL optimal swap sequences on <= 5 vertices, + * and also all swap sequences of length: * <= 9 on 6 vertices (K6, depth 9); * <= 12 on cycles with <= 6 vertices (C5, C6); * <= 12 on a few other special graphs with 6 vertices. @@ -81,7 +86,7 @@ namespace tsa_internal { * and the third has the form [ab cb ab] == [ca].) It seems like we'd need a * scheme involving integer hashing of graphs, with few isomorphic collisions, * but such algoritms need to be pretty simple and fast or they're not worth - * doing except for much larger table sizes). + * doing except for much larger table sizes. */ struct SwapSequenceTable { /** The integer type used to encode a swap sequence on vertices {0,1,2,3,4,5}. diff --git a/tket/src/Utils/include/Utils/Assert.hpp b/tket/src/Utils/include/Utils/Assert.hpp index 019740f8ec..66a465abb7 100644 --- a/tket/src/Utils/include/Utils/Assert.hpp +++ b/tket/src/Utils/include/Utils/Assert.hpp @@ -33,19 +33,22 @@ * to a stringstream) will NOT begin if `condition` is true, * so there is no performance penalty. * - * Note: the intention was that the code would be ignored by test code - * coverage, even if multiline. However that didn't work, so we may - * just manually surround the worst multiline offenders until we come up - * with a better solution. - * * This also checks if evaluating `condition` itself throws an exception. * - * Note: we already tried having another version which threw exceptions - * instead of aborting. However, it led to problems: + * Notes: (1) single line code is ignored by test code coverage. + * However, multiline code is not. Currently we just manually surround the + * worst multiline offenders with start/stop tags. + * (2) We tried putting the start/stop tags within the macro, to make test + * coverage ignore the code; unfortunately this did NOT work. + * We suspect that it's because comments are stripped from the macro before + * test coverage sees the code, but we don't know. See TKET-1856. + * (3) It is known that exceptions cause problems by generating numerous + * extra branches: * https://stackoverflow.com/questions/42003783/ * lcov-gcov-branch-coverage-with-c-producing-branches-all-over-the-place?rq=1 - * Thus, if you want to throw an exception rather than aborting, - * there are additional problems which need to be overcome somehow. + * However, although removing exceptions (or "hiding" them, by tricks) + * did cut down the number of extra branches, it did not remove them + * completely, so exceptions are not the sole cause of branching problems. */ #define TKET_ASSERT_WITH_MESSAGE(condition, msg) \ do { \ diff --git a/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp index 9fbebf93cc..047c2804e8 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ #include #include -#include "TokenSwapping/DebugFunctions.hpp" +#include "../TestUtils/DebugFunctions.hpp" #include "TokenSwapping/ExactMappingLookup.hpp" #include "TokenSwapping/GeneralFunctions.hpp" #include "TokenSwapping/VertexMappingFunctions.hpp" diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp index 722a6825a1..524de4484b 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -71,7 +71,7 @@ static void check_progress( static std::string run_tests( const std::vector& problems, DistancesInterface& distances, - NeighboursInterface& neighbours, PathFinderInterface& path_finder, + NeighboursInterface& neighbours, RiverFlowPathFinder& path_finder, PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, TokenOption token_option) { REQUIRE(!problems.empty()); @@ -114,15 +114,14 @@ static std::string run_tests( default: break; } - ss << " PF=" << path_finder.name() << "\n" - << statistics.str(problems.size()) << "]"; + ss << " PF=RiverFlow\n" << statistics.str(problems.size()) << "]"; return ss.str(); } std::string run_tests( const ArchitectureMapping& arch_mapping, const std::vector& problems, - PathFinderInterface& path_finder, PartialTsaInterface& partial_tsa, + RiverFlowPathFinder& path_finder, PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, TokenOption token_option) { DistancesFromArchitecture distances(arch_mapping); NeighboursFromArchitecture neighbours(arch_mapping); diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp index fe4c3b9857..1d08b223c2 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -32,7 +32,7 @@ enum class TokenOption { std::string run_tests( const ArchitectureMapping& arch_mapping, const std::vector& problems, - PathFinderInterface& path_finder, PartialTsaInterface& partial_tsa, + RiverFlowPathFinder& path_finder, PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, TokenOption token_option = TokenOption::DO_NOT_ALLOW_EMPTY_TOKEN_SWAP); diff --git a/tket/tests/TokenSwapping/test_FullTsa.cpp b/tket/tests/TokenSwapping/test_FullTsa.cpp index b9aae17bcb..369a31f948 100644 --- a/tket/tests/TokenSwapping/test_FullTsa.cpp +++ b/tket/tests/TokenSwapping/test_FullTsa.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ #include "TestUtils/ArchitectureEdgesReimplementation.hpp" #include "TestUtils/FullTsaTesting.hpp" #include "TestUtils/ProblemGeneration.hpp" -#include "TokenSwapping/HybridTsa00.hpp" +#include "TokenSwapping/HybridTsa.hpp" using std::vector; @@ -29,7 +29,7 @@ namespace { struct FullTester { FullTsaTesting results; FullTsaTesting trivial_results; - HybridTsa00 full_tsa; + HybridTsa full_tsa; TrivialTSA trivial_tsa; RNG rng; ProblemGenerator00 generator; diff --git a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp index 85242d0f36..2727651427 100644 --- a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp +++ b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ #include #include -#include "TokenSwapping/DebugFunctions.hpp" +#include "TestUtils/DebugFunctions.hpp" #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" diff --git a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp index aba973ec54..80fcc185e0 100644 --- a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp +++ b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp @@ -1,4 +1,4 @@ -// Copyright 2019-2021 Cambridge Quantum Computing +// Copyright 2019-2022 Cambridge Quantum Computing // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #include "TestUtils/PartialTsaTesting.hpp" #include "TestUtils/ProblemGeneration.hpp" #include "TokenSwapping/CyclesPartialTsa.hpp" -#include "TokenSwapping/DebugFunctions.hpp" +#include "TestUtils/DebugFunctions.hpp" #include "TokenSwapping/RNG.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/TrivialTSA.hpp" From 8719249e5edcdc5f14ccc2294aaac13cea29fbc3 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 20:05:45 +0000 Subject: [PATCH 075/146] rename main_entry_functions -> SwapsFromQubitMapping; remove unused function --- tket/src/Mapping/MappingManager.cpp | 2 +- tket/src/TokenSwapping/README.txt | 128 +++++++++++++++ .../TokenSwapping/SwapsFromQubitMapping.cpp | 69 +++++++++ .../TokenSwapping/SwapsFromQubitMapping.hpp | 39 +++++ .../TokenSwapping/main_entry_functions.hpp | 64 -------- .../TokenSwapping/main_entry_functions.cpp | 146 ------------------ ...ons.cpp => test_SwapsFromQubitMapping.cpp} | 9 +- 7 files changed, 241 insertions(+), 216 deletions(-) create mode 100644 tket/src/TokenSwapping/README.txt create mode 100644 tket/src/TokenSwapping/SwapsFromQubitMapping.cpp create mode 100644 tket/src/TokenSwapping/include/TokenSwapping/SwapsFromQubitMapping.hpp delete mode 100644 tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp delete mode 100644 tket/src/TokenSwapping/main_entry_functions.cpp rename tket/tests/TokenSwapping/{test_main_entry_functions.cpp => test_SwapsFromQubitMapping.cpp} (96%) diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index 3df1a52f17..ec4b5f293f 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -15,7 +15,7 @@ #include "Mapping/MappingManager.hpp" #include "OpType/OpTypeFunctions.hpp" -#include "TokenSwapping/main_entry_functions.hpp" +#include "TokenSwapping/SwapsFromQubitMapping.hpp" namespace tket { diff --git a/tket/src/TokenSwapping/README.txt b/tket/src/TokenSwapping/README.txt new file mode 100644 index 0000000000..0a7d21d8f1 --- /dev/null +++ b/tket/src/TokenSwapping/README.txt @@ -0,0 +1,128 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +Some brief explanation of the Token Swapping algorithms here may be helpful. + +PROBLEM: let G be a graph (undirected, no loops or multiple edges), with labelled distinct tokens (counters) on some or all of the vertices. + +- An allowed move is to choose two adjacent vertices v1, v2 and swap whatever tokens T1, T2 are currently on those vertices. (Or, if one of the vertices, say v2, does not have a token, simply move the token T1 from v1 to v2). + +- We are given a desired final rearrangement of the tokens. + +- The problem is to compute a swap sequence, of shortest length, which will transform the initial token configuration into the desired final configuration. + +- Thus, if every vertex contains a token, we are trying to perform a given permutation on the vertices of the graph. + +- It is not hard to show: that if the graph is connected, then EVERY rearrangement is possible. + +- More generally, a solution exists if and only if: for every token, the initial and final vertex are in the same connected component. + +The 2016 paper "Approximation and Hardness of Token Swapping" by Tillmann Miltzow, Lothar Narins, Yoshio Okamoto, Gunter Rote, Antonis Thomas, Takeaki Uno is very useful. + +One of the main results is an algorithm, based upon finding cycles in the graph, which (in the full case, where every vertex has a token) is guaranteed to use no more than 4x the optimal number of swaps, or 2x for trees. + +The 2019 paper "Circuit Transformations for Quantum Architectures" by Andrew M. Childs, Eddie Schoute and Cem M. Unsal generalises to the partial case (where some vertices might not contain a token). + + +KNOWN RESTRICTIONS: + +If the graph G is not connected, our routines may fail. + +- We plan to fix this; until then, a workaround is to split the problem into connected components. + +- Of course, in real problems, architectures are connected so this doesn't arise. + + +OUR ALGORITHMS: + +Let L be the sum of distances of each token T from its final destination vertex. + +Thus L >= 0 always, and the problem is finished if and only if L=0. + +Thus, the goal of a token swapping algorithm (TSA) is to reduce L to zero, using as few swaps as possible. + +(1) Cycle finding + +We make some observations: + +- The algorithms in the 2016 and 2019 papers try to find a cycle [v(0), v(1), ..., v(n), v(0)] in G. Initially, let vertex v(j) have token T(j). The swaps [v(0), v(1)], [v(1), v(2)], ..., [v(n-1), v(n)] then have the effect of performing a cyclic shift: T(0) -> v(n), T(1) -> v(0), T(2) -> v(1), ..., T(n) -> v(n-1). + +- However, we do not need the swap [v(n), v(0)]. Thus, we can perform cyclic shifts along paths [v(0), v(1), ..., v(n)] instead of cycles. + +- The paper algorithms search for cycles where EVERY token move T->v is beneficial, i.e. moves the token T one step closer to its final destination. Thus, the cycle on n vertices reduces L by n, at the cost of n-1 swaps. + +- Cyclic shifts on n vertices may exist which contain swaps which don't decrease L (they might even increase L). However, as long as the overall effect is a decrease in L, the cyclic shift is not bad. + +Thus, our algorithm works by searching for good PATHS instead of cycles; it allows some individual swaps to be bad, as long as the overall effect is still good. + +(2) Alternative moves when cycles fail + +When no good cycles exist, a different move must be performed. The performed move might not decrease L (although L never increases, it may leave L unchanged), and so we must worry whether the algorithm will ever terminate. + +- In the papers, the performed moves are carefully chosen so that theorems guarantee that the algorithm terminates in a reasonable number of moves. + +- We have no such theoretical guarantee. Instead, we perform a "trivial TSA", i.e., a sequence of swaps guaranteed to reduce L to zero eventually, even if the number of swaps is not so good. + +However, notice that we need not perform the "trivial TSA" swaps until the end; we can break off as soon as L decreases, and switch back to the normal cyclic-shift mode, which is expected to reduce L more quickly. + +(3) Additional reductions + +We perform two more reductions, which are new: + +- General swap list optimisation: given a sequence S of swaps, various simple substitutions may reduce the length of S, whilst keeping the overall effect of S unchanged; provided that we only use the same swaps that were present in S, the new sequence is still valid (does not use any further edges of the graph). + +- General table lookup reduction: we have a large precomputed table which contains optimal swap sequences on graphs with <= 6 vertices. Thus, given our computed swap sequence S, we find the vertex mapping between two times, look up an optimal swap sequence for the mapping in the table (using only edges in our given graph, i.e. valid swaps), and replace the swap segment if the new sequence is shorter. + + + +THE MAIN ALGORITHMIC CLASSES: + + +BestFullTsa: + +The main end-to-end mapping. Uses HybridTsa to compute a solution; then general reduction rules (SwapListOptimiser) and table lookups (SwapListSegmentOptimiser, SwapListTableOptimiser) to reduce the swap sequence length, whilst preserving the mapping. + + +HybridTsa: + +Combines CyclesPartialTsa, TrivialTSA to get an overall full TSA (token swapping algorithm). This works by running CyclesPartialTsa whenever possible; switching to TrivialTSA when it gets stuck; and breaking off TrivialTSA and switching back to CyclesPartialTsa when progress is made (i.e., L is decreased). + +Note that HybridTsa also uses simple heuristics about which abstract cycles to perform first, and which abstract swap to omit. + +(I.e., to perform the abstract cyclic shift v0->v1->v2->...->v(n)->v0, we often write [v(n), v(n-1)], [v(n-1), v(n-2)], ..., [v2, v1], [v1, v0]. Thus we have OMITTED [v0, v(n)] from the set of swaps [v(i), v(i+1)] for i=0,1,2,...,n, where v(n+1) = v(0) by definition. However, we could have omitted any of the n+1 swaps). + +HybridTsa tries to estimate which ordering is likely to reduce L the quickest; although TrivialTSA works, it is expected to be worse than CyclesPartialTsa when that class works, so we want to break off as soon as possible. + + +CyclesPartialTsa: + +The main "cycle finding" class, corresponding to the cycles in the papers. "Partial" because it is not guaranteed to find a swap sequence. Constructs "concrete" paths, i.e. actual paths in the graph, which give rise to "abstract cycles" (i.e. a cyclic shift, performed by swapping along the path). + + +TrivialTSA: + +Performs any desired vertex permutation, as follows: + +(i) split the permutation into disjoint cycles. (Called "abstract" cycles, because they are unrelated to the cycles in the actual graph. They are TOTALLY UNRELATED to the cycles of "CyclesPartialTsa"! In fact they know nothing of the underlying graph). + +(ii) decompose the abstract cycles into "abstract swaps", i.e. without knowing the edges of the graph, the cyclic shift v0->v1->v2->...->vn->v0 can be rewritten as the abstract swaps [vn, v(n-1)], [v(n-1), v(n-2)], ..., [v2, v1], [v1, v0], which might not be possible in the graph. + +(iii) decompose the abstract swaps into concrete swaps. I.e., choose a path [u, v0, v1, ..., v(n), v] between given (u,v), so that the abstract swap(u,v) can be performed by swapping along the path. + + +RiverFlowPathFinder: + +Actually computes the path required by TrivialTSA, for part (iii). We don't just choose a path at random; we deliberately make the paths overlap as much as possible, for better optimisation later (and tests showed that this really is significant). + + diff --git a/tket/src/TokenSwapping/SwapsFromQubitMapping.cpp b/tket/src/TokenSwapping/SwapsFromQubitMapping.cpp new file mode 100644 index 0000000000..aa7ac3bd80 --- /dev/null +++ b/tket/src/TokenSwapping/SwapsFromQubitMapping.cpp @@ -0,0 +1,69 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/SwapsFromQubitMapping.hpp" + +#include +#include + +#include "TokenSwapping/BestFullTsa.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" +#include "Utils/Assert.hpp" + +namespace tket { + +using namespace tsa_internal; + +std::vector> get_swaps( + const Architecture& architecture, const NodeMapping& node_mapping) { + std::vector> swaps; + // Before all the conversion and object construction, + // doesn't take long to check if it's actually trivial + bool trivial = true; + for (const auto& entry : node_mapping) { + if (entry.first != entry.second) { + trivial = false; + break; + } + } + if (trivial) { + return swaps; + } + // Now convert the Nodes into raw vertices for use in TSA objects. + const ArchitectureMapping arch_mapping(architecture); + VertexMapping vertex_mapping; + for (const auto& node_entry : node_mapping) { + vertex_mapping[arch_mapping.get_vertex(node_entry.first)] = + arch_mapping.get_vertex(node_entry.second); + } + TKET_ASSERT(vertex_mapping.size() == node_mapping.size()); + check_mapping(vertex_mapping); + + SwapList raw_swap_list; + BestFullTsa().append_partial_solution( + raw_swap_list, vertex_mapping, arch_mapping); + + // Finally, convert the raw swaps back to nodes. + swaps.reserve(raw_swap_list.size()); + for (auto id_opt = raw_swap_list.front_id(); id_opt; + id_opt = raw_swap_list.next(id_opt.value())) { + const auto& raw_swap = raw_swap_list.at(id_opt.value()); + swaps.emplace_back(std::make_pair( + arch_mapping.get_node(raw_swap.first), + arch_mapping.get_node(raw_swap.second))); + } + return swaps; +} + +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapsFromQubitMapping.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapsFromQubitMapping.hpp new file mode 100644 index 0000000000..b7a70cb286 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapsFromQubitMapping.hpp @@ -0,0 +1,39 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +#include "Architecture/Architecture.hpp" + +namespace tket { + +/** This specifies desired source->target vertex mappings. + * Any nodes not occurring as a key might be moved by the algorithm. + */ +typedef std::map NodeMapping; + +/** Version 1.1, not too bad. + * @param architecture The raw object containing the graph. + * @param node_mapping The desired source->target node mapping. + * @return The required list of node pairs to swap. + */ +std::vector> get_swaps( + const Architecture& architecture, const NodeMapping& node_mapping); + +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp deleted file mode 100644 index c5a776bf19..0000000000 --- a/tket/src/TokenSwapping/include/TokenSwapping/main_entry_functions.hpp +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include -#include - -#include "Architecture/Architecture.hpp" -#include "Circuit/Circuit.hpp" - -namespace tket { - -/** This specifies desired source->target vertex mappings. - * Any nodes not occurring as a key might be moved by the algorithm. - */ -typedef std::map NodeMapping; - -/** Version 1.1, not too bad. - * @param architecture The raw object containing the graph. - * @param node_mapping The desired source->target node mapping. - * @return The required list of node pairs to swap. - */ -std::vector> get_swaps( - const Architecture& architecture, const NodeMapping& node_mapping); - -/** An alternative interface, which just wraps the other "get_swaps" function. - * In the returned tuple, the Circuit implements using SWAP gates, - * and the unit_map_t objects are the initial and final mappings of - * logical qubits to architecture nodes. - * NOTE: the architecture may contain other nodes not mentioned in the - * input logical->physical maps, which may get moved. - * If you don't want this, you must include these nodes in the maps. - * @param architecture The architecture (containing nodes, and edges) - * @param initial_logical_to_physical_map The key is the initial logical qubit, - * the value is the existing physical node in the architecture - * which it currently maps to. - * @param desired_logical_to_physical_map The keys are the same logical qubits - * as in "initial_logical_to_physical_map", but the values are now - * the nodes where we want them to map AFTER the swaps. - * @return A circuit containing the swaps (SWAP gates only), plus the resultant - * logical to physical mappings before and after (necessarily the same as - * the input mappings, because the returned swaps should always result - * in the desired end-to-end mapping exactly). - */ -std::tuple get_swaps( - const Architecture& architecture, - const unit_map_t& initial_logical_to_physical_map, - const unit_map_t& desired_logical_to_physical_map); - -} // namespace tket diff --git a/tket/src/TokenSwapping/main_entry_functions.cpp b/tket/src/TokenSwapping/main_entry_functions.cpp deleted file mode 100644 index 12507f2463..0000000000 --- a/tket/src/TokenSwapping/main_entry_functions.cpp +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "main_entry_functions.hpp" - -#include -#include - -#include "BestFullTsa.hpp" -#include "Utils/Assert.hpp" -#include "VertexMappingFunctions.hpp" - -namespace tket { - -using namespace tsa_internal; - -std::vector> get_swaps( - const Architecture& architecture, const NodeMapping& node_mapping) { - std::vector> swaps; - // Before all the conversion and object construction, - // doesn't take long to check if it's actually trivial - bool trivial = true; - for (const auto& entry : node_mapping) { - if (entry.first != entry.second) { - trivial = false; - break; - } - } - if (trivial) { - return swaps; - } - // Now convert the Nodes into raw vertices for use in TSA objects. - const ArchitectureMapping arch_mapping(architecture); - VertexMapping vertex_mapping; - for (const auto& node_entry : node_mapping) { - vertex_mapping[arch_mapping.get_vertex(node_entry.first)] = - arch_mapping.get_vertex(node_entry.second); - } - TKET_ASSERT(vertex_mapping.size() == node_mapping.size()); - check_mapping(vertex_mapping); - - SwapList raw_swap_list; - BestFullTsa().append_partial_solution( - raw_swap_list, vertex_mapping, arch_mapping); - - // Finally, convert the raw swaps back to nodes. - swaps.reserve(raw_swap_list.size()); - for (auto id_opt = raw_swap_list.front_id(); id_opt; - id_opt = raw_swap_list.next(id_opt.value())) { - const auto& raw_swap = raw_swap_list.at(id_opt.value()); - swaps.emplace_back(std::make_pair( - arch_mapping.get_node(raw_swap.first), - arch_mapping.get_node(raw_swap.second))); - } - return swaps; -} - -// TODO: we really should add tests for this! -// GCOVR_EXCL_START -std::tuple get_swaps( - const Architecture& architecture, - const unit_map_t& initial_logical_to_physical_map, - const unit_map_t& desired_logical_to_physical_map) { - // The physical qubits are nodes inside the architecture. - // Some Node <--> UnitID conversion is unavoidable with the current design, - // since Architecture uses Node objects, rather than UnitID objects, - // and types like vector and vector cannot be converted - // to each other without copying, even though each Node is just - // a UnitID with no extra data (C++ containers are not "covariant"). - NodeMapping node_mapping; - for (const std::pair& initial_entry : - initial_logical_to_physical_map) { - const auto citer = - desired_logical_to_physical_map.find(initial_entry.first); - if (citer == desired_logical_to_physical_map.cend()) { - std::stringstream ss; - ss << "Logical qubit " << initial_entry.first.repr() - << " is present in the initial logical->physical map, but not in the " - "target logical->physical map"; - throw std::runtime_error(ss.str()); - } - const Node source_physical_node(initial_entry.second); - const Node target_physical_node(citer->second); - node_mapping[source_physical_node] = target_physical_node; - } - if (initial_logical_to_physical_map.size() != - desired_logical_to_physical_map.size()) { - std::stringstream ss; - ss << "Initial and final logical->physical mappings have different sizes " - << initial_logical_to_physical_map.size() << ", " - << desired_logical_to_physical_map.size() - << ". There are extra logical qubits in the final map missing from the " - "initial map"; - throw std::runtime_error(ss.str()); - } - if (node_mapping.size() != initial_logical_to_physical_map.size()) { - std::stringstream ss; - ss << "Converted " << initial_logical_to_physical_map.size() - << " distinct logical qubits to " << node_mapping.size() - << " distinct physical nodes; conversion error"; - throw std::runtime_error(ss.str()); - } - const auto node_swaps = get_swaps(architecture, node_mapping); - - // Don't add unused nodes to the final circuit. - std::set nodes_seen; - for (const auto& swap : node_swaps) { - nodes_seen.insert(swap.first); - nodes_seen.insert(swap.second); - } - - std::tuple result; - - // We rely on the algorithm to be correct, - // i.e. it really has calculated the full desired mapping. - // - // NOTE: other nodes in the architecture might be involved in the swaps, - // even if they were not mentioned in any of the input logical->physical maps. - // But that's OK; if the caller wants to keep them fixed, - // they should have put them into the input maps. - std::get<1>(result) = initial_logical_to_physical_map; - std::get<2>(result) = desired_logical_to_physical_map; - - for (const Node& node : nodes_seen) { - std::get<0>(result).add_qubit(node); - } - // Now we can add the swaps. - for (const auto& swap : node_swaps) { - std::get<0>(result).add_op(OpType::SWAP, {swap.first, swap.second}); - } - return result; -} -// GCOVR_EXCL_STOP - -} // namespace tket diff --git a/tket/tests/TokenSwapping/test_main_entry_functions.cpp b/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp similarity index 96% rename from tket/tests/TokenSwapping/test_main_entry_functions.cpp rename to tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp index 59d31a27de..91d2480e46 100644 --- a/tket/tests/TokenSwapping/test_main_entry_functions.cpp +++ b/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp @@ -15,8 +15,8 @@ #include #include -#include "TokenSwapping/RNG.hpp" -#include "TokenSwapping/main_entry_functions.hpp" +#include "Utils/RNG.hpp" +#include "TokenSwapping/SwapsFromQubitMapping.hpp" using std::vector; @@ -24,10 +24,9 @@ using std::vector; // are done elsewhere, so this is really just checking conversion. namespace tket { -namespace tsa_internal { namespace tests { -SCENARIO("main entry function for TSA") { +SCENARIO("get_swaps : swaps returned directly from architecture") { // Will summarise relevant data, so that we can see any changes. std::stringstream problem_ss; @@ -118,6 +117,6 @@ SCENARIO("main entry function for TSA") { REQUIRE(nodes_copy == node_final_positions); } + } // namespace tests -} // namespace tsa_internal } // namespace tket From 07aa0ed7ae2aafa4f907e59cf7f0abde5bab3f7b Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 20:11:07 +0000 Subject: [PATCH 076/146] move RNG from token swapping to Utils; erase tests/Graphs/RNG --- tket/src/Utils/CMakeLists.txt | 1 + tket/src/{TokenSwapping => Utils}/RNG.cpp | 2 +- .../include/Utils}/RNG.hpp | 13 +- tket/tests/Graphs/EdgeSequence.hpp | 2 +- tket/tests/Graphs/RNG.cpp | 167 ------------------ tket/tests/Graphs/RNG.hpp | 163 ----------------- tket/tests/Graphs/RandomGraphGeneration.cpp | 2 +- tket/tests/Graphs/RandomPlanarGraphs.cpp | 2 +- tket/tests/Graphs/RandomPlanarGraphs.hpp | 2 +- tket/tests/Graphs/test_GraphColouring.cpp | 2 +- .../tests/Graphs/test_GraphFindComponents.cpp | 2 +- tket/tests/Graphs/test_GraphFindMaxClique.cpp | 2 +- .../TableLookup/test_CanonicalRelabelling.cpp | 2 +- .../test_FilteredSwapSequences.cpp | 2 +- .../TestUtils/FullTsaTesting.hpp | 2 +- .../TestUtils/PartialTsaTesting.hpp | 2 +- .../TestUtils/ProblemGeneration.hpp | 2 +- .../test_RiverFlowPathFinder.cpp | 2 +- .../TokenSwapping/test_SwapListOptimiser.cpp | 2 +- .../TokenSwapping/test_VariousPartialTsa.cpp | 2 +- .../TokenSwapping/test_VectorListHybrid.cpp | 2 +- .../test_VectorListHybridSkeleton.cpp | 2 +- tket/tests/{Graphs => Utils}/test_RNG.cpp | 6 +- tket/tests/tkettestsfiles.cmake | 6 +- 24 files changed, 27 insertions(+), 365 deletions(-) rename tket/src/{TokenSwapping => Utils}/RNG.cpp (99%) rename tket/src/{TokenSwapping/include/TokenSwapping => Utils/include/Utils}/RNG.hpp (93%) delete mode 100644 tket/tests/Graphs/RNG.cpp delete mode 100644 tket/tests/Graphs/RNG.hpp rename tket/tests/{Graphs => Utils}/test_RNG.cpp (97%) diff --git a/tket/src/Utils/CMakeLists.txt b/tket/src/Utils/CMakeLists.txt index c2c734093a..16ec28d8d2 100644 --- a/tket/src/Utils/CMakeLists.txt +++ b/tket/src/Utils/CMakeLists.txt @@ -24,6 +24,7 @@ add_library(tket-${COMP} HelperFunctions.cpp MatrixAnalysis.cpp PauliStrings.cpp + RNG.cpp CosSinDecomposition.cpp Expression.cpp) diff --git a/tket/src/TokenSwapping/RNG.cpp b/tket/src/Utils/RNG.cpp similarity index 99% rename from tket/src/TokenSwapping/RNG.cpp rename to tket/src/Utils/RNG.cpp index daa3c05e4a..506d6237db 100644 --- a/tket/src/TokenSwapping/RNG.cpp +++ b/tket/src/Utils/RNG.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "RNG.hpp" +#include "Utils/RNG.hpp" using std::vector; diff --git a/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp b/tket/src/Utils/include/Utils/RNG.hpp similarity index 93% rename from tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp rename to tket/src/Utils/include/Utils/RNG.hpp index 99f93d0e2b..7b3c17acec 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/RNG.hpp +++ b/tket/src/Utils/include/Utils/RNG.hpp @@ -38,9 +38,7 @@ namespace tket { // even for something as simple as a uniform distribution. // The same applies to, e.g., std::random_shuffle. -/** - * TODO: move this, once decided where (I would prefer Utils). - * A random number generator class. +/** A random number generator class. * Of course, this is only for random test data generation, * definitely NOT suitable for any kind of cryptography! * Note that there are no functions involving doubles anywhere! @@ -74,12 +72,9 @@ class RNG { size_t get_size_t(size_t min_value, size_t max_value); /** - * I believe that the behaviour on the Mersenne twister random engine - * is guaranteed by the C++ standard, although I'm not 100% sure - * (but it seems to work in tests). - * The standard specifies 5489u as the default initial seed, so it would - * be rather pointless to do that if the bits generated - * were still implementation-dependent. + * The behaviour of the RAW BITS of the Mersenne twister random engine + * is guaranteed by the C++ standard. + * The standard specifies 5489u as the default initial seed. * @param seed A seed value, to alter the RNG state. * By default, uses the value specified by the standard. */ diff --git a/tket/tests/Graphs/EdgeSequence.hpp b/tket/tests/Graphs/EdgeSequence.hpp index 9df6bf3458..05c88a9617 100644 --- a/tket/tests/Graphs/EdgeSequence.hpp +++ b/tket/tests/Graphs/EdgeSequence.hpp @@ -18,7 +18,7 @@ #include #include -#include "RNG.hpp" +#include "Utils/RNG.hpp" namespace tket { namespace graphs { diff --git a/tket/tests/Graphs/RNG.cpp b/tket/tests/Graphs/RNG.cpp deleted file mode 100644 index 1ee6fceff5..0000000000 --- a/tket/tests/Graphs/RNG.cpp +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "RNG.hpp" - -using std::size_t; -using std::vector; - -namespace tket { -namespace graphs { -namespace tests { - -size_t RNG::get_size_t(size_t max_value) { - if (max_value == 0) { - return 0; - } - // Raw data; now must convert to a value to return! - const std::uint64_t random_int = m_engine(); - - if (max_value > m_engine.max() / 4) { - // If choosing such a large potential number of values, - // the bias will unavoidably be very bad, - // if only generating a single random int. - // Surely no deterministic function - // f : {0,1,...,N} -> {0,1,...,M} - // can be close to giving a uniform distribution, - // if N != M are both large and nearly equal. - // (Should be a theorem in here somewhere!) - if (max_value >= m_engine.max()) { - // Care! Maybe max_value+1 == 0 by wraparound, - // so we cannot do division by max_value+1 ! - return random_int; - } - return random_int % (max_value + 1); - } - - // NOW we know that max_value+1 won't overflow. - - // Mathematical note on the below: let: - // m = maximum possible value of "random_int" - // w = interval_width - // v = max possible value to return. - // - // Thus, random_int could be one of {0,1,2,...,m}, - // and we must return one of {0,1,2,...,v}. - // - // With int arithmetic, we get w = int((m+1)/(v+1)). - // - // e.g., if m=5, v=2, then w = int(6/3) = 2, - // the possible random_int values are {0,1,2,3,4,5}, - // and this is partitioned into 3 sets: - // {0,1}, {2,3}, {4,5}. - // - // [Since, with int arithmetic, - // int(0/2) = int(1/2) = 0, int(2/2) = int(3/2) = 1, ...] - // - // Because these sets have equal size 2, each of the values 0,1,2 - // has equal probability 1/3 of being returned. - // BUT, what if (m+1)/(v+1) is not an integer? - // - // e.g., m=5, v=3. - // Now, we must partition the set {0,1,2,3,4,5} into 4 subsets. - // With the below algorithm, w=int((5+1)/(3+1)) = 1, so the partition is - // {0}, {1}, {2}, {3,4,5}. - // Notice that 0,1,2 have probabilities 1/6 of being returned, - // but v=3 has probability 3/6 of being returned, quite a large bias. - // - // How bad can it be? In general: - // - // (m+1)/(v+1) - 1 < w <= (m+1)/(v+1). - // Thus - // m-v+1 <= w(v+1) <= m+1. - // - // Now, the random_int sets causing the values 0,1,...,v to be returned are - // - // { 0, 1, ..., w-1} --> returns 0 - // { w, w+1, ..., 2w-1} --> returns 1 - // {2w, 2w+1, ..., 3w-1} --> returns 2 - // .... - // {vw, vw+1, ..., (v+1)w - 1, ... , m } --> returns v - // - // Notice that all sets except the last have size w. - // The last set has size m-vw+1. So, the final value v has - // more ways than the other values 0,1,... to be returned, by a factor of - // - // U = (m-vw+1)/w = (m+1)/w - v. - // - // U is the "bias factor" which we want to be as close to 1 as possible. - // Always, U >= (m+1)/[(m+1)/(v+1)] - v = v+1-v = 1, - // as we already know. Also, - // - // U <= (m+1)(v+1)/(m-v+1) - v. - // - // Let's assume that v << m. - // Then we can expand with a geometric series: - // (m+1)(v+1)/(m-v+1) = (v+1).[1-v/(m+1)]^{-1} - // = (v+1).[1 + v/(m+1) + A] - // = v+1 + v(v+1)/(m+1) + (v+1)A, - // - // where A ~ (v/m)^2, with ~ here meaning - // "roughly equal size, up to constant factors". - // Thus, U <= 1 + v(v+1)/(m+1) + (v+1)A. - // - // So, finally, assume also that v(v+1) << m+1. - // [This is the same as saying v^2 << m, since m = 2^64-1 is very large, - // and thus m+1~m, sqrt(m+1)~sqrt(m)]. - // - // ...then: A ~ (v^2/m) / m << 1/m, (v+1)A << v/m << 1, - // and so U = 1 + C where C << 1. - // - // Thus, the bias towards the max value v is negligible, as required. - - // Divide range into approximately equal widths. - // Notice, we can't do m_engine.max()+1 because it overflows to 0. - // But the chance of getting m_engine.max() is negligibly small anyway. - const std::uint64_t interval_width = - m_engine.max() / - // Doesn't overflow, because of the above checks. - (static_cast(max_value) + 1); - - // interval_width cannot be zero, because we ensured above that - // max_value + 1 <= m_engine.max(). - const size_t result = random_int / interval_width; - - // Modulo arithmetic shouldn't be necessary, but be paranoid, - // in case there are mistakes in the above analysis (very likely!) - return result % (max_value + 1); -} - -size_t RNG::get_size_t(size_t min_value, size_t max_value) { - if (min_value > max_value) { - std::swap(min_value, max_value); - } - return min_value + get_size_t(max_value - min_value); -} - -vector RNG::get_permutation(size_t size) { - vector numbers(size); - for (size_t i = 0; i < size; ++i) { - numbers[i] = i; - } - do_shuffle(numbers); - return numbers; -} - -void RNG::set_seed(size_t seed) { m_engine.seed(seed); } - -bool RNG::check_percentage(size_t percentage) { - // e.g. the numbers {0,1,2,3,4} are 5% - // of the numbers {0,1,...,99}. - return get_size_t(99) < percentage; -} - -} // namespace tests -} // namespace graphs -} // namespace tket diff --git a/tket/tests/Graphs/RNG.hpp b/tket/tests/Graphs/RNG.hpp deleted file mode 100644 index c7b6707731..0000000000 --- a/tket/tests/Graphs/RNG.hpp +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include -#include - -namespace tket { -namespace graphs { -namespace tests { - -// Something like this is needed for proper random test data generation -// if you want to be platform-independent, as the C++ standard is stupid. -// (A major scandal, in my opinion). -// The random engines are mostly guaranteed by the standard, -// but the DISTRIBUTIONS, e.g. uniform_distribution, are NOT -// (i.e., the actual algorithm used to convert a string of bits to a number -// in the range {0,1,2,...,N} is not specified at all by the C++ standard). -// Thus, we are NOT guaranteed to get the same results, even with the same -// (1) engine; (2) initial seed; (3) distribution, -// if we use different platforms (or even different compilers -// on the SAME platform), or even different compiler VERSIONS!!! -// -// The C++ standard as far as I know does not specify ANY distribution -// implementations, not even optionally, so you HAVE to do this yourself, -// even for something as simple as a uniform distribution. -// The same applies to, e.g., std::random_shuffle. - -/** - * TODO: move this to a better place, once decided where. - * A random number generator class. - * Of course, this is only for random test data generation, - * definitely NOT suitable for any kind of cryptography! - * Note that there are no functions involving doubles anywhere! - * Actually, double calculations can give very slightly different answers - * across platforms, compilers, compiler optimisation settings; - * the numerical difference is absolutely negligible, - * but it's worth being ultra cautious! - */ -class RNG { - public: - /** - * Return a random integer from 0 to N, inclusive. - * Approximately uniform, if max_value is much less than - * the max possible value that can be returned. - * N << sqrt(max uint64) ~ 2^32 ~ 4e9 will work well. - * See the comments in the cpp file implementation for more detail. - * - * @param max_value The value N which is the (inclusive) maximum value - * which can be returned. - * @return A size_t from the inclusive range {0,1,2,...,N}. - */ - std::size_t get_size_t(std::size_t max_value); - - /** - * Returns a number in the inclusive interval, including the endpoints. - * - * @return A size_t from the inclusive range {a, a+1, a+2, ... , b}. - */ - std::size_t get_size_t(std::size_t min_value, std::size_t max_value); - - /** - * I believe that the behaviour on the Mersenne twister random engine - * is guaranteed by the C++ standard, although I'm not 100% sure. - * The standard specifies 5489u as the default initial seed, so it would - * be rather pointless to do that if the bits generated - * were still implementation-dependent. - */ - void set_seed(std::size_t seed); - - /** Return true p% of the time. - * (Very quick and dirty, doesn't check for, e.g., 110% effort...) - * As mentioned above, we deliberately DON'T have a function returning - * a uniform double. Sticking to integer values is safest. - */ - bool check_percentage(std::size_t percentage); - - /** - * Simply shuffle the elements around at random. - * Approximately uniform over all possible permutations. - * This is necessary because C++ random_shuffle is - * implementation-dependent (see above comments). - */ - template - void do_shuffle(std::vector& elements) { - if (elements.size() < 2) { - return; - } - m_shuffling_data.resize(elements.size()); - for (std::size_t i = 0; i < m_shuffling_data.size(); ++i) { - m_shuffling_data[i].first = m_engine(); - m_shuffling_data[i].second = i; - } - std::sort( - m_shuffling_data.begin(), m_shuffling_data.end(), - [](const std::pair& lhs, - const std::pair& rhs) { - return lhs.first < rhs.first || - (lhs.first == rhs.first && lhs.second < rhs.second); - }); - // Don't need to make a copy of "elements"! Just do repeated swaps... - for (std::size_t i = 0; i < m_shuffling_data.size(); ++i) { - const std::size_t& j = m_shuffling_data[i].second; - if (i != j) { - std::swap(elements[i], elements[j]); - } - } - } - - /** Return a random element from the vector. */ - template - const T& get_element(const std::vector& elements) { - if (elements.empty()) { - throw std::runtime_error("RNG: get_element called on empty vector"); - } - return elements[get_size_t(elements.size() - 1)]; - } - - /** - * Pick out a random element from the vector, copy and return it, - * but also remove that element from the vector (swapping with - * the back for efficiency, i.e. the ordering changes). - */ - template - T get_and_remove_element(std::vector& elements) { - if (elements.empty()) { - throw std::runtime_error( - "RNG: get_and_remove_element called on empty vector"); - } - std::size_t index = get_size_t(elements.size() - 1); - const T copy = elements[index]; - elements[index] = elements.back(); - elements.pop_back(); - return copy; - } - - /** Returns the numbers {0,1,2,...,N-1} in some random order. */ - std::vector get_permutation(std::size_t size); - - private: - std::mt19937_64 m_engine; - - // Avoids repeated memory reallocation. - std::vector> m_shuffling_data; -}; - -} // namespace tests -} // namespace graphs -} // namespace tket diff --git a/tket/tests/Graphs/RandomGraphGeneration.cpp b/tket/tests/Graphs/RandomGraphGeneration.cpp index 929552c6c0..45c2923d68 100644 --- a/tket/tests/Graphs/RandomGraphGeneration.cpp +++ b/tket/tests/Graphs/RandomGraphGeneration.cpp @@ -19,7 +19,7 @@ #include "EdgeSequence.hpp" #include "Graphs/AdjacencyData.hpp" -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" using std::vector; diff --git a/tket/tests/Graphs/RandomPlanarGraphs.cpp b/tket/tests/Graphs/RandomPlanarGraphs.cpp index 3ea82b13c8..406e6fc01d 100644 --- a/tket/tests/Graphs/RandomPlanarGraphs.cpp +++ b/tket/tests/Graphs/RandomPlanarGraphs.cpp @@ -16,7 +16,7 @@ #include -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" using std::vector; diff --git a/tket/tests/Graphs/RandomPlanarGraphs.hpp b/tket/tests/Graphs/RandomPlanarGraphs.hpp index 707e30ec2b..b29b159ad2 100644 --- a/tket/tests/Graphs/RandomPlanarGraphs.hpp +++ b/tket/tests/Graphs/RandomPlanarGraphs.hpp @@ -18,7 +18,7 @@ #include #include -#include "RNG.hpp" +#include "Utils/RNG.hpp" namespace tket { namespace graphs { diff --git a/tket/tests/Graphs/test_GraphColouring.cpp b/tket/tests/Graphs/test_GraphColouring.cpp index 5c3b22f3df..18b4016122 100644 --- a/tket/tests/Graphs/test_GraphColouring.cpp +++ b/tket/tests/Graphs/test_GraphColouring.cpp @@ -21,7 +21,7 @@ #include "Graphs/GraphColouring.hpp" #include "RandomGraphGeneration.hpp" #include "RandomPlanarGraphs.hpp" -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" using std::map; using std::vector; diff --git a/tket/tests/Graphs/test_GraphFindComponents.cpp b/tket/tests/Graphs/test_GraphFindComponents.cpp index 505d97d57b..059f3d8ff0 100644 --- a/tket/tests/Graphs/test_GraphFindComponents.cpp +++ b/tket/tests/Graphs/test_GraphFindComponents.cpp @@ -17,7 +17,7 @@ #include "Graphs/AdjacencyData.hpp" #include "Graphs/GraphRoutines.hpp" -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" using std::map; using std::set; diff --git a/tket/tests/Graphs/test_GraphFindMaxClique.cpp b/tket/tests/Graphs/test_GraphFindMaxClique.cpp index 8da88d68d7..38c1b00d01 100644 --- a/tket/tests/Graphs/test_GraphFindMaxClique.cpp +++ b/tket/tests/Graphs/test_GraphFindMaxClique.cpp @@ -18,7 +18,7 @@ #include "Graphs/AdjacencyData.hpp" #include "Graphs/GraphRoutines.hpp" #include "Graphs/LargeCliquesResult.hpp" -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" using std::set; using std::vector; diff --git a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp index 6e0fa8f2da..024b09862e 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp @@ -19,7 +19,7 @@ #include "PermutationTestUtils.hpp" #include "TokenSwapping/CanonicalRelabelling.hpp" -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp index 3af1da4756..856048668c 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp @@ -17,7 +17,7 @@ #include #include "TokenSwapping/FilteredSwapSequences.hpp" -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp index bb93aa43e2..d85543a0aa 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp @@ -16,8 +16,8 @@ #include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/PartialTsaInterface.hpp" -#include "TokenSwapping/RNG.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" +#include "Utils/RNG.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp index 1d08b223c2..b494afe52d 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp @@ -16,7 +16,7 @@ #include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/PartialTsaInterface.hpp" -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp index 2e81c5c82b..a84e41ad42 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp @@ -15,8 +15,8 @@ #pragma once #include "Architecture/Architecture.hpp" -#include "TokenSwapping/RNG.hpp" #include "TokenSwapping/VertexMappingFunctions.hpp" +#include "Utils/RNG.hpp" namespace tket { namespace tsa_internal { diff --git a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp index 446fc39e74..3f11048db8 100644 --- a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp +++ b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp @@ -19,7 +19,7 @@ #include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/DistancesFromArchitecture.hpp" #include "TokenSwapping/NeighboursFromArchitecture.hpp" -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp index 2727651427..a68d2664f3 100644 --- a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp +++ b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp @@ -18,7 +18,7 @@ #include #include "TestUtils/DebugFunctions.hpp" -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp index 80fcc185e0..92e222d527 100644 --- a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp +++ b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp @@ -19,7 +19,7 @@ #include "TestUtils/ProblemGeneration.hpp" #include "TokenSwapping/CyclesPartialTsa.hpp" #include "TestUtils/DebugFunctions.hpp" -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/TrivialTSA.hpp" diff --git a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp index 9ae0d81a4f..ca84d7e741 100644 --- a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp +++ b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp @@ -16,7 +16,7 @@ #include #include -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" #include "TokenSwapping/VectorListHybrid.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp index 57e09601f9..bf0d0de722 100644 --- a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp +++ b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp @@ -20,7 +20,7 @@ #include #include -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" #include "TokenSwapping/VectorListHybridSkeleton.hpp" using std::vector; diff --git a/tket/tests/Graphs/test_RNG.cpp b/tket/tests/Utils/test_RNG.cpp similarity index 97% rename from tket/tests/Graphs/test_RNG.cpp rename to tket/tests/Utils/test_RNG.cpp index 480e9cb456..1b75d568af 100644 --- a/tket/tests/Graphs/test_RNG.cpp +++ b/tket/tests/Utils/test_RNG.cpp @@ -18,14 +18,12 @@ #include #include -#include "TokenSwapping/RNG.hpp" +#include "Utils/RNG.hpp" using std::stringstream; using std::vector; namespace tket { -namespace graphs { -namespace tests { // Check that the RNG really is identical across all platforms. @@ -144,6 +142,4 @@ SCENARIO("RNG: permutations") { " 69 24 68 71 64 84 36 65 97 98 52 45 ]"); } -} // namespace tests -} // namespace graphs } // namespace tket diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index d5ed8fcc20..0c376130f4 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -25,16 +25,15 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/CircuitsForTesting.cpp ${TKET_TESTS_DIR}/Utils/test_MatrixAnalysis.cpp ${TKET_TESTS_DIR}/Utils/test_CosSinDecomposition.cpp + ${TKET_TESTS_DIR}/Utils/test_RNG.cpp ${TKET_TESTS_DIR}/Graphs/EdgeSequence.cpp ${TKET_TESTS_DIR}/Graphs/EdgeSequenceColouringParameters.cpp ${TKET_TESTS_DIR}/Graphs/GraphTestingRoutines.cpp ${TKET_TESTS_DIR}/Graphs/RandomGraphGeneration.cpp ${TKET_TESTS_DIR}/Graphs/RandomPlanarGraphs.cpp - ${TKET_TESTS_DIR}/Graphs/RNG.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphColouring.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphFindComponents.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphFindMaxClique.cpp - ${TKET_TESTS_DIR}/Graphs/test_RNG.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphUtils.cpp ${TKET_TESTS_DIR}/Graphs/test_DirectedGraph.cpp ${TKET_TESTS_DIR}/Graphs/test_ArticulationPoints.cpp @@ -54,6 +53,7 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DebugFunctions.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DecodedProblemData.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/FullTsaTesting.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/GetRandomSet.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/PartialTsaTesting.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ProblemGeneration.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/TestStatsStructs.cpp @@ -62,8 +62,8 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/TokenSwapping/test_BestTsaFixedSwapSequences.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_DistancesFromArchitecture.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_FullTsa.cpp - ${TKET_TESTS_DIR}/TokenSwapping/test_main_entry_functions.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_RiverFlowPathFinder.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_SwapsFromQubitMapping.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_SwapList.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_SwapListOptimiser.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_VariousPartialTsa.cpp From f0654477fcaa8c1ce422a3eec7b45b64a31cf852 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 20:13:12 +0000 Subject: [PATCH 077/146] move get_random_set out of token swapping into test utils --- .../TokenSwapping/GeneralFunctions.hpp | 20 +--------- .../TokenSwapping/TestUtils/GetRandomSet.cpp} | 11 +++--- .../TokenSwapping/TestUtils/GetRandomSet.hpp | 37 +++++++++++++++++++ .../TestUtils/ProblemGeneration.cpp | 1 + 4 files changed, 45 insertions(+), 24 deletions(-) rename tket/{src/TokenSwapping/TSAUtils/GeneralFunctions.cpp => tests/TokenSwapping/TestUtils/GetRandomSet.cpp} (87%) create mode 100644 tket/tests/TokenSwapping/TestUtils/GetRandomSet.hpp diff --git a/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp index e4abcaae33..6d6ade36bc 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp @@ -21,9 +21,8 @@ #include #include #include -#include -#include "TokenSwapping/RNG.hpp" +#include "Utils/Assert.hpp" namespace tket { namespace tsa_internal { @@ -46,20 +45,15 @@ std::optional get_optional_value(const std::map& map, const K& key) { /** The key->value mapping is required to be bijective (reversible). * @param map The std::map object. * @return Another std::map, with the key->value mappings reversed. - * Throws if the map is not reversible. */ template std::map get_reversed_map(const std::map& map) { - // GCOVR_EXCL_START std::map reversed_map; for (const auto& entry : map) { reversed_map[entry.second] = entry.first; } - if (map.size() != reversed_map.size()) { - throw std::runtime_error("get_reversed_map called with non-reversible map"); - } + TKET_ASSERT(map.size() == reversed_map.size()); return reversed_map; - // GCOVR_EXCL_STOP } /** Finds the rightmost "one" (least significant bit) @@ -87,15 +81,5 @@ static UINT get_rightmost_bit(UINT& x) { return bit; } -/** Return a random subset of given size from the population {0,1,2,...,N}. - * @param rng A random number generator. - * @param sample_size The desired size of the returned set. - * @param population_size The number of elements in the population (an interval - * of nonnegative integers, starting at 0). - * @return A set of numbers. Throws upon invalid parameters. - */ -std::set get_random_set( - RNG& rng, size_t sample_size, size_t population_size); - } // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp b/tket/tests/TokenSwapping/TestUtils/GetRandomSet.cpp similarity index 87% rename from tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp rename to tket/tests/TokenSwapping/TestUtils/GetRandomSet.cpp index 97041c0131..796cdfc6e7 100644 --- a/tket/src/TokenSwapping/TSAUtils/GeneralFunctions.cpp +++ b/tket/tests/TokenSwapping/TestUtils/GetRandomSet.cpp @@ -12,22 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "TokenSwapping/GeneralFunctions.hpp" +#include "GetRandomSet.hpp" #include -#include #include "Utils/Assert.hpp" namespace tket { namespace tsa_internal { +namespace tests { std::set get_random_set( RNG& rng, size_t sample_size, size_t population_size) { - // GCOVR_EXCL_START - TKET_ASSERT( - sample_size <= population_size || !"get_random_set: sample too large"); - // GCOVR_EXCL_STOP + TKET_ASSERT(sample_size <= population_size); + std::set result; if (sample_size == 0 || population_size == 0) { return result; @@ -51,5 +49,6 @@ std::set get_random_set( return result; } +} // namespace tests } // namespace tsa_internal } // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/GetRandomSet.hpp b/tket/tests/TokenSwapping/TestUtils/GetRandomSet.hpp new file mode 100644 index 0000000000..1215a92657 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/GetRandomSet.hpp @@ -0,0 +1,37 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "Utils/RNG.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** Return a random subset of given size from the population {0,1,2,...,N}. + * @param rng A random number generator. + * @param sample_size The desired size of the returned set. + * @param population_size The number of elements in the population (an interval + * of nonnegative integers, starting at 0). + * @return A set of numbers. Throws upon invalid parameters. + */ +std::set get_random_set( + RNG& rng, size_t sample_size, size_t population_size); + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp index 2f6da5ceeb..2e2012aeae 100644 --- a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp @@ -16,6 +16,7 @@ #include +#include "GetRandomSet.hpp" #include "TokenSwapping/GeneralFunctions.hpp" using std::vector; From 34507b62e3e00f03bc201bf01b5b2610bd2e1a59 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 20:16:39 +0000 Subject: [PATCH 078/146] move some stuff out of namespace tsa_internal into namespace tket --- .../src/TokenSwapping/ArchitectureMapping.cpp | 2 - tket/src/TokenSwapping/BestFullTsa.cpp | 6 +-- tket/src/TokenSwapping/CMakeLists.txt | 8 +-- .../TokenSwapping/TSAUtils/SwapFunctions.cpp | 2 - .../TSAUtils/VertexMappingFunctions.cpp | 4 +- .../TokenSwapping/ArchitectureMapping.hpp | 2 - .../include/TokenSwapping/BestFullTsa.hpp | 54 +++++++++---------- .../include/TokenSwapping/SwapFunctions.hpp | 2 - .../TokenSwapping/VectorListHybrid.hpp | 12 ++--- .../TokenSwapping/VertexMappingFunctions.hpp | 2 - 10 files changed, 38 insertions(+), 56 deletions(-) diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwapping/ArchitectureMapping.cpp index 8011d3917a..a0aa287022 100644 --- a/tket/src/TokenSwapping/ArchitectureMapping.cpp +++ b/tket/src/TokenSwapping/ArchitectureMapping.cpp @@ -20,7 +20,6 @@ #include "Utils/Assert.hpp" namespace tket { -namespace tsa_internal { ArchitectureMapping::ArchitectureMapping(const Architecture& arch) : m_arch(arch) { @@ -131,5 +130,4 @@ std::vector ArchitectureMapping::get_edges() const { return edges; } -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/BestFullTsa.cpp b/tket/src/TokenSwapping/BestFullTsa.cpp index fafce59fca..f0f81ddbae 100644 --- a/tket/src/TokenSwapping/BestFullTsa.cpp +++ b/tket/src/TokenSwapping/BestFullTsa.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "BestFullTsa.hpp" +#include "TokenSwapping/BestFullTsa.hpp" #include "TokenSwapping/DistancesFromArchitecture.hpp" #include "TokenSwapping/NeighboursFromArchitecture.hpp" @@ -20,7 +20,8 @@ #include "TokenSwapping/VertexMapResizing.hpp" namespace tket { -namespace tsa_internal { + +using namespace tsa_internal; BestFullTsa::BestFullTsa() { m_name = "BestFullTsa"; } @@ -60,5 +61,4 @@ void BestFullTsa::append_partial_solution( m_swap_list_optimiser); } -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/CMakeLists.txt b/tket/src/TokenSwapping/CMakeLists.txt index 14e74dacac..04d2898ca6 100644 --- a/tket/src/TokenSwapping/CMakeLists.txt +++ b/tket/src/TokenSwapping/CMakeLists.txt @@ -29,17 +29,15 @@ add_library(tket-${COMP} DistancesInterface.cpp DynamicTokenTracker.cpp HybridTsa.cpp - main_entry_functions.cpp NeighboursFromArchitecture.cpp NeighboursInterface.cpp PartialTsaInterface.cpp RiverFlowPathFinder.cpp - RNG.cpp SwapListOptimiser.cpp + SwapsFromQubitMapping.cpp TrivialTSA.cpp VectorListHybridSkeleton.cpp TSAUtils/DistanceFunctions.cpp - TSAUtils/GeneralFunctions.cpp TSAUtils/SwapFunctions.cpp TSAUtils/VertexMappingFunctions.cpp TSAUtils/VertexSwapResult.cpp @@ -56,11 +54,7 @@ add_library(tket-${COMP} list(APPEND DEPS_${COMP} Architecture - Circuit - Gate Graphs - Ops - OpType Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp index 870e51ed75..108cb9339f 100644 --- a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp @@ -18,7 +18,6 @@ #include namespace tket { -namespace tsa_internal { Swap get_swap(size_t v1, size_t v2) { if (v1 == v2) { @@ -37,5 +36,4 @@ bool disjoint(const Swap& s1, const Swap& s2) { s1.second != s2.first && s1.second != s2.second; } -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index c5445774cd..9b67ca2aae 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -21,7 +21,8 @@ #include "Utils/Assert.hpp" namespace tket { -namespace tsa_internal { + +using namespace tsa_internal; bool all_tokens_home(const VertexMapping& vertex_mapping) { for (const auto& entry : vertex_mapping) { @@ -89,5 +90,4 @@ void add_swap(VertexMapping& source_to_target_map, const Swap& swap) { std::swap(source_to_target_map[source_v1], source_to_target_map[source_v2]); } -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp b/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp index 44f006a555..eecfc5bd92 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp @@ -18,7 +18,6 @@ #include "TokenSwapping/SwapFunctions.hpp" namespace tket { -namespace tsa_internal { /** For mapping between nodes in an architecture and size_t vertex numbers. * The vertex numbers are merely the indices of each Node @@ -105,5 +104,4 @@ class ArchitectureMapping { std::map m_node_to_vertex_mapping; }; -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp index ee531499ff..14cd96d597 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp @@ -16,22 +16,35 @@ #include "ArchitectureMapping.hpp" #include "HybridTsa.hpp" -#include "RNG.hpp" #include "SwapListOptimiser.hpp" -#include "TokenSwapping/SwapListTableOptimiser.hpp" +#include "SwapListTableOptimiser.hpp" +#include "Utils/RNG.hpp" namespace tket { -namespace tsa_internal { -/** To enable easier experimentation, keep this up-to-date with the best - * end-to-end known default options, but also make it possible to change - * the options. - * Also include the best known postprocessing swap list optimisations. +/** This class combines all the different token swapping components together + * in the best known way to get the best overall end-to-end routine + * (including different heuristics, parameters etc. whose optimal values + * are unknown, and require experimentation). */ -class BestFullTsa : public PartialTsaInterface { +class BestFullTsa : public tsa_internal::PartialTsaInterface { public: BestFullTsa(); + /** The main entry function. Given the desired vertex mapping, a list + * of swaps (which may or may not be empty), and information about + * the architecture (the underlying graph), append extra swaps to it + * to produce the desired mapping. + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping. Will be updated with + * the new added swaps. + * @param arch_mapping An ArchitectureMapping object, which knows the graph, + * and how to do Node <-> vertex size_t conversions. + */ + void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + const ArchitectureMapping& arch_mapping); + /** We emphasise that, unlike the general PartialTsaInterface, the solution * returned is complete, AND includes all known swap list optimisations. * Warning: unlike most PartialTsaInterface objects, the vertex_mapping @@ -48,28 +61,15 @@ class BestFullTsa : public PartialTsaInterface { */ virtual void append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, - DistancesInterface& distances, NeighboursInterface& neighbours, - RiverFlowPathFinder& path_finder) override; - - /** Wrapper around the main append_partial_solution function, but constructing - * and using the best known RiverFlowPathFinder object. The DistancesInterface - * and NeighboursInterface objects will automatically be constructed. - * @param swaps The list of swaps to append to. - * @param vertex_mapping The current desired mapping. Will be updated with - * the new added swaps. - * @param arch_mapping An ArchitectureMapping object, which knows the graph, - * how to do Node <-> vertex size_t conversions, etc. - */ - void append_partial_solution( - SwapList& swaps, VertexMapping& vertex_mapping, - const ArchitectureMapping& arch_mapping); + tsa_internal::DistancesInterface& distances, + tsa_internal::NeighboursInterface& neighbours, + tsa_internal::RiverFlowPathFinder& path_finder) override; private: - HybridTsa m_hybrid_tsa; - SwapListOptimiser m_swap_list_optimiser; - SwapListTableOptimiser m_table_optimiser; + tsa_internal::HybridTsa m_hybrid_tsa; + tsa_internal::SwapListOptimiser m_swap_list_optimiser; + tsa_internal::SwapListTableOptimiser m_table_optimiser; RNG m_rng; }; -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp index ac74509b68..f64db27266 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp @@ -21,7 +21,6 @@ #include "TokenSwapping/VectorListHybrid.hpp" namespace tket { -namespace tsa_internal { typedef std::pair Swap; typedef VectorListHybrid SwapList; @@ -44,5 +43,4 @@ Swap get_swap(size_t v1, size_t v2); */ bool disjoint(const Swap& swap1, const Swap& swap2); -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp index d043249950..4abf5fec1e 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp @@ -21,11 +21,10 @@ #include "VectorListHybridSkeleton.hpp" namespace tket { -namespace tsa_internal { struct OverwriteIntervalResult { size_t number_of_overwritten_elements; - VectorListHybridSkeleton::Index final_overwritten_element_id; + tsa_internal::VectorListHybridSkeleton::Index final_overwritten_element_id; }; /** VectorListHybrid combines some functionality of std::vector @@ -77,7 +76,7 @@ class VectorListHybrid { /** NOTE: the ID is NOT necessarily an actual vector index; * that's an implementation detail. */ - typedef VectorListHybridSkeleton::Index ID; + typedef tsa_internal::VectorListHybridSkeleton::Index ID; VectorListHybrid(); @@ -272,7 +271,7 @@ class VectorListHybrid { std::string debug_str() const; private: - VectorListHybridSkeleton m_links_data; + tsa_internal::VectorListHybridSkeleton m_links_data; /// The actual stored elements. std::vector m_data; @@ -303,13 +302,13 @@ VectorListHybrid::VectorListHybrid() {} template typename VectorListHybrid::ID VectorListHybrid::get_invalid_id() { - return VectorListHybridSkeleton::get_invalid_index(); + return tsa_internal::VectorListHybridSkeleton::get_invalid_index(); } template std::optional::ID> VectorListHybrid::optional_id(ID id) { - if (id == VectorListHybridSkeleton::get_invalid_index()) { + if (id == tsa_internal::VectorListHybridSkeleton::get_invalid_index()) { return {}; } return id; @@ -532,5 +531,4 @@ std::string VectorListHybrid::debug_str() const { return ss.str(); } -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp index de78dae7e1..1226c64acf 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp @@ -21,7 +21,6 @@ #include "SwapFunctions.hpp" namespace tket { -namespace tsa_internal { /// The desired result of swapping is to move a token on each "key" /// vertex to the "value" vertex. @@ -82,5 +81,4 @@ size_t get_source_vertex( */ void add_swap(VertexMapping& source_to_target_map, const Swap& swap); -} // namespace tsa_internal } // namespace tket From 7e08c8cf8a6afb9cf5df6be97fa6fd16b892c1bf Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Mon, 14 Feb 2022 20:18:36 +0000 Subject: [PATCH 079/146] more cleanup --- tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp | 4 +--- tket/src/Gate/GateUnitarySparseMatrix.cpp | 2 +- tket/src/Graphs/BruteForceColouring.cpp | 2 +- .../include/TokenSwapping/RiverFlowPathFinder.hpp | 2 +- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp b/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp index 7743473aaf..a6aaf317dd 100644 --- a/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp +++ b/tket/src/Gate/GateUnitaryMatrixVariableQubits.cpp @@ -70,10 +70,8 @@ Eigen::MatrixXcd GateUnitaryMatrixVariableQubits::get_dense_unitary( return GateUnitaryMatrixImplementations::NPhasedX( number_of_qubits, parameters[0], parameters[1]); default: - break; + TKET_ASSERT(false); } - TKET_ASSERT(false); - return Eigen::MatrixXcd(); } } // namespace internal diff --git a/tket/src/Gate/GateUnitarySparseMatrix.cpp b/tket/src/Gate/GateUnitarySparseMatrix.cpp index 1fa085db83..aac7fba756 100644 --- a/tket/src/Gate/GateUnitarySparseMatrix.cpp +++ b/tket/src/Gate/GateUnitarySparseMatrix.cpp @@ -159,7 +159,7 @@ std::vector GateUnitarySparseMatrix::get_unitary_triplets( ss << "Converting " << gate.get_name() << " to sparse unitary, via adding controls to gate type " << desc.name() << ": " << e.what(); - throw GateUnitaryMatrixError(ss.str(), e.cause); + TKET_ASSERT_WITH_MESSAGE(false, ss.str()); // GCOVR_EXCL_STOP } } diff --git a/tket/src/Graphs/BruteForceColouring.cpp b/tket/src/Graphs/BruteForceColouring.cpp index 1f5995bbe7..7b2d9744fd 100644 --- a/tket/src/Graphs/BruteForceColouring.cpp +++ b/tket/src/Graphs/BruteForceColouring.cpp @@ -222,7 +222,7 @@ BruteForceColouring::BruteForceColouring( << ", reached suggested_number_of_colours = " << suggested_number_of_colours << ", had " << number_of_nodes << " nodes. Error: " << e.what() << priority.print_raw_data(); - throw std::runtime_error(ss.str()); + TKET_ASSERT_WITH_MESSAGE(false, ss.str()); // GCOVR_EXCL_STOP } } diff --git a/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp b/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp index daaab1609e..6ab237726e 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp @@ -20,7 +20,7 @@ #include "DistancesInterface.hpp" #include "NeighboursInterface.hpp" -#include "RNG.hpp" +#include "Utils/RNG.hpp" namespace tket { namespace tsa_internal { From efeb70400ba3bafd5e0530be4006d1a4da1211f4 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 09:02:57 +0000 Subject: [PATCH 080/146] clang format --- tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp | 2 +- tket/tests/TokenSwapping/test_SwapListOptimiser.cpp | 2 +- tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp | 3 +-- tket/tests/TokenSwapping/test_VariousPartialTsa.cpp | 4 ++-- tket/tests/TokenSwapping/test_VectorListHybrid.cpp | 2 +- tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp | 2 +- 6 files changed, 7 insertions(+), 8 deletions(-) diff --git a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp index 3f11048db8..2f679434ba 100644 --- a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp +++ b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp @@ -19,8 +19,8 @@ #include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/DistancesFromArchitecture.hpp" #include "TokenSwapping/NeighboursFromArchitecture.hpp" -#include "Utils/RNG.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "Utils/RNG.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp index a68d2664f3..dc3cef50fc 100644 --- a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp +++ b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp @@ -18,8 +18,8 @@ #include #include "TestUtils/DebugFunctions.hpp" -#include "Utils/RNG.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" +#include "Utils/RNG.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp b/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp index 91d2480e46..15a1b89400 100644 --- a/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp +++ b/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp @@ -15,8 +15,8 @@ #include #include -#include "Utils/RNG.hpp" #include "TokenSwapping/SwapsFromQubitMapping.hpp" +#include "Utils/RNG.hpp" using std::vector; @@ -117,6 +117,5 @@ SCENARIO("get_swaps : swaps returned directly from architecture") { REQUIRE(nodes_copy == node_final_positions); } - } // namespace tests } // namespace tket diff --git a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp index 92e222d527..25717d8240 100644 --- a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp +++ b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp @@ -15,13 +15,13 @@ #include #include "TestUtils/ArchitectureEdgesReimplementation.hpp" +#include "TestUtils/DebugFunctions.hpp" #include "TestUtils/PartialTsaTesting.hpp" #include "TestUtils/ProblemGeneration.hpp" #include "TokenSwapping/CyclesPartialTsa.hpp" -#include "TestUtils/DebugFunctions.hpp" -#include "Utils/RNG.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/TrivialTSA.hpp" +#include "Utils/RNG.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp index ca84d7e741..ea179510df 100644 --- a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp +++ b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp @@ -16,8 +16,8 @@ #include #include -#include "Utils/RNG.hpp" #include "TokenSwapping/VectorListHybrid.hpp" +#include "Utils/RNG.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp index bf0d0de722..65baab5eb8 100644 --- a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp +++ b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp @@ -20,8 +20,8 @@ #include #include -#include "Utils/RNG.hpp" #include "TokenSwapping/VectorListHybridSkeleton.hpp" +#include "Utils/RNG.hpp" using std::vector; From c88e949c97d2cbe1afabd7f0e28ee2e1632222e1 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 09:11:01 +0000 Subject: [PATCH 081/146] added OpType dependency to token swapping --- tket/src/TokenSwapping/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tket/src/TokenSwapping/CMakeLists.txt b/tket/src/TokenSwapping/CMakeLists.txt index 04d2898ca6..8ca4128346 100644 --- a/tket/src/TokenSwapping/CMakeLists.txt +++ b/tket/src/TokenSwapping/CMakeLists.txt @@ -55,6 +55,7 @@ add_library(tket-${COMP} list(APPEND DEPS_${COMP} Architecture Graphs + OpType Utils) foreach(DEP ${DEPS_${COMP}}) From ef57306ca34d3ff42682fb044cf6251d241766c7 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 09:31:08 +0000 Subject: [PATCH 082/146] clang format --- tket/src/TokenSwapping/HybridTsa.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/tket/src/TokenSwapping/HybridTsa.cpp b/tket/src/TokenSwapping/HybridTsa.cpp index b11b4b173e..304b606a78 100644 --- a/tket/src/TokenSwapping/HybridTsa.cpp +++ b/tket/src/TokenSwapping/HybridTsa.cpp @@ -27,7 +27,6 @@ HybridTsa::HybridTsa() { m_trivial_tsa.set(TrivialTSA::Options::BREAK_AFTER_PROGRESS); } - void HybridTsa::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& neighbours, From 192171a810c6c219b629740599904174c2bd554d Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 10:00:56 +0000 Subject: [PATCH 083/146] replace throws with TKET_ASSERT_WITH_MESSAGE --- tket/src/Graphs/ColouringPriority.cpp | 8 +++++--- tket/src/Graphs/GraphColouring.cpp | 7 +++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/tket/src/Graphs/ColouringPriority.cpp b/tket/src/Graphs/ColouringPriority.cpp index 4dd9a1ab50..ede03a1ff3 100644 --- a/tket/src/Graphs/ColouringPriority.cpp +++ b/tket/src/Graphs/ColouringPriority.cpp @@ -18,6 +18,7 @@ #include #include "AdjacencyData.hpp" +#include "Utils/Assert.hpp" using std::map; using std::set; @@ -95,7 +96,7 @@ static void fill_initial_node_sequence( << adjacency_data.get_number_of_vertices() << " vertices)." << " So far, filled " << nodes.size() << " nodes." << " Error: " << e.what(); - throw std::runtime_error(ss.str()); + TKET_ASSERT_WITH_MESSAGE(false, ss.str()); // GCOVR_EXCL_STOP } } @@ -121,8 +122,9 @@ const ColouringPriority::Nodes& ColouringPriority::get_nodes() const { return m_nodes; } +// GCOVR_EXCL_START +// currently used only within a tket assert macro string ColouringPriority::print_raw_data(bool relabel_to_simplify) const { - // GCOVR_EXCL_START map old_vertex_to_new_vertex; if (relabel_to_simplify) { for (size_t i = 0; i < m_nodes.size(); ++i) { @@ -174,8 +176,8 @@ string ColouringPriority::print_raw_data(bool relabel_to_simplify) const { } ss << "\n};\n\n"; return ss.str(); - // GCOVR_EXCL_STOP } +// GCOVR_EXCL_STOP ColouringPriority::ColouringPriority( const AdjacencyData& adjacency_data, diff --git a/tket/src/Graphs/GraphColouring.cpp b/tket/src/Graphs/GraphColouring.cpp index 828009dc40..0c551a6b67 100644 --- a/tket/src/Graphs/GraphColouring.cpp +++ b/tket/src/Graphs/GraphColouring.cpp @@ -98,7 +98,7 @@ static void colour_single_component( ss << "colouring single component " << component_index << " returned vertex " << vertex << " with colour " << colour << " : " << e.what(); - throw runtime_error(ss.str()); + TKET_ASSERT_WITH_MESSAGE(false, ss.str()); } // GCOVR_EXCL_STOP } @@ -172,11 +172,10 @@ GraphColouringResult GraphColouringRoutines::get_colouring( } catch (const exception& e) { // GCOVR_EXCL_START stringstream ss; - ss << "GraphColouringRoutines::get_colouring: we had " - << connected_components.size() << " connected components, " + ss << "We had " << connected_components.size() << " connected components, " << adjacency_data.get_number_of_vertices() << " vertices in total: " << e.what(); - throw runtime_error(ss.str()); + TKET_ASSERT_WITH_MESSAGE(false, ss.str()); // GCOVR_EXCL_STOP } } From 61a7b7e583cf6d0911763de7894df41f63c6ae0f Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 10:04:49 +0000 Subject: [PATCH 084/146] move BruteForceColouring.hpp, ColouringPriority.hpp out of include directory --- tket/src/Graphs/{include/Graphs => }/BruteForceColouring.hpp | 0 tket/src/Graphs/{include/Graphs => }/ColouringPriority.hpp | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tket/src/Graphs/{include/Graphs => }/BruteForceColouring.hpp (100%) rename tket/src/Graphs/{include/Graphs => }/ColouringPriority.hpp (100%) diff --git a/tket/src/Graphs/include/Graphs/BruteForceColouring.hpp b/tket/src/Graphs/BruteForceColouring.hpp similarity index 100% rename from tket/src/Graphs/include/Graphs/BruteForceColouring.hpp rename to tket/src/Graphs/BruteForceColouring.hpp diff --git a/tket/src/Graphs/include/Graphs/ColouringPriority.hpp b/tket/src/Graphs/ColouringPriority.hpp similarity index 100% rename from tket/src/Graphs/include/Graphs/ColouringPriority.hpp rename to tket/src/Graphs/ColouringPriority.hpp From fff97a9795654955de8ba4aca63d06f7eab65432 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 15:37:14 +0000 Subject: [PATCH 085/146] add TokenSwappingWithArch project; move files out of TokenSwapping --- recipes/tket/conanfile.py | 1 + tket/src/CMakeLists.txt | 3 +- tket/src/Mapping/CMakeLists.txt | 1 + tket/src/Mapping/MappingManager.cpp | 4 +- .../TokenSwapping/SwapsFromQubitMapping.hpp | 39 ----------- .../ArchitectureMapping.cpp | 0 .../BestTsaWithArch.cpp} | 25 +++++--- tket/src/TokenSwappingWithArch/CMakeLists.txt | 48 ++++++++++++++ .../DistancesFromArchitecture.cpp | 2 - .../NeighboursFromArchitecture.cpp | 2 - .../ArchitectureMapping.hpp | 0 .../TokenSwappingWithArch/BestTsaWithArch.hpp | 64 +++++++++++++++++++ .../DistancesFromArchitecture.hpp | 6 +- .../NeighboursFromArchitecture.hpp | 4 +- 14 files changed, 138 insertions(+), 61 deletions(-) delete mode 100644 tket/src/TokenSwapping/include/TokenSwapping/SwapsFromQubitMapping.hpp rename tket/src/{TokenSwapping => TokenSwappingWithArch}/ArchitectureMapping.cpp (100%) rename tket/src/{TokenSwapping/SwapsFromQubitMapping.cpp => TokenSwappingWithArch/BestTsaWithArch.cpp} (75%) create mode 100644 tket/src/TokenSwappingWithArch/CMakeLists.txt rename tket/src/{TokenSwapping => TokenSwappingWithArch}/DistancesFromArchitecture.cpp (98%) rename tket/src/{TokenSwapping => TokenSwappingWithArch}/NeighboursFromArchitecture.cpp (97%) rename tket/src/{TokenSwapping/include/TokenSwapping => TokenSwappingWithArch/include/TokenSwappingWithArch}/ArchitectureMapping.hpp (100%) create mode 100644 tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/BestTsaWithArch.hpp rename tket/src/{TokenSwapping/include/TokenSwapping => TokenSwappingWithArch/include/TokenSwappingWithArch}/DistancesFromArchitecture.hpp (96%) rename tket/src/{TokenSwapping/include/TokenSwapping => TokenSwappingWithArch/include/TokenSwappingWithArch}/NeighboursFromArchitecture.hpp (95%) diff --git a/recipes/tket/conanfile.py b/recipes/tket/conanfile.py index c376ed25b1..24bcbfa657 100644 --- a/recipes/tket/conanfile.py +++ b/recipes/tket/conanfile.py @@ -64,6 +64,7 @@ class TketConan(ConanFile): "Characterisation", "Converters", "TokenSwapping", + "TokenSwappingWithArch", "Mapping", "Placement", "MeasurementSetup", diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index f6924b79a1..f6d5b4b7cb 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -61,6 +61,7 @@ ENDIF() # this list corresponds to a topological sorting of the dependency graph of the different modules list(APPEND TKET_COMPS Utils + TokenSwapping ZX OpType Clifford @@ -70,6 +71,7 @@ list(APPEND TKET_COMPS PauliGraph Circuit Architecture + TokenSwappingWithArch Simulation Diagonalisation Program @@ -77,7 +79,6 @@ list(APPEND TKET_COMPS Converters Placement ArchAwareSynth - TokenSwapping Mapping MeasurementSetup Transformations diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt index b7c6c74769..f885520e64 100644 --- a/tket/src/Mapping/CMakeLists.txt +++ b/tket/src/Mapping/CMakeLists.txt @@ -36,6 +36,7 @@ list(APPEND DEPS_${COMP} Ops OpType TokenSwapping + TokenSwappingWithArch Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index de87f24ba2..48dda61172 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -14,7 +14,7 @@ #include "Mapping/MappingManager.hpp" -#include "TokenSwapping/SwapsFromQubitMapping.hpp" +#include "TokenSwappingWithArch/BestTsaWithArch.hpp" namespace tket { @@ -88,7 +88,7 @@ bool MappingManager::route_circuit_with_maps( node_map.insert({Node(x.first), Node(x.second)}); } for (const std::pair& swap : - get_swaps(*this->architecture_, node_map)) { + BestTsaWithArch::get_swaps(*this->architecture_, node_map)) { mapping_frontier->add_swap(swap.first, swap.second); } } diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapsFromQubitMapping.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapsFromQubitMapping.hpp deleted file mode 100644 index b7a70cb286..0000000000 --- a/tket/src/TokenSwapping/include/TokenSwapping/SwapsFromQubitMapping.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include -#include - -#include "Architecture/Architecture.hpp" - -namespace tket { - -/** This specifies desired source->target vertex mappings. - * Any nodes not occurring as a key might be moved by the algorithm. - */ -typedef std::map NodeMapping; - -/** Version 1.1, not too bad. - * @param architecture The raw object containing the graph. - * @param node_mapping The desired source->target node mapping. - * @return The required list of node pairs to swap. - */ -std::vector> get_swaps( - const Architecture& architecture, const NodeMapping& node_mapping); - -} // namespace tket diff --git a/tket/src/TokenSwapping/ArchitectureMapping.cpp b/tket/src/TokenSwappingWithArch/ArchitectureMapping.cpp similarity index 100% rename from tket/src/TokenSwapping/ArchitectureMapping.cpp rename to tket/src/TokenSwappingWithArch/ArchitectureMapping.cpp diff --git a/tket/src/TokenSwapping/SwapsFromQubitMapping.cpp b/tket/src/TokenSwappingWithArch/BestTsaWithArch.cpp similarity index 75% rename from tket/src/TokenSwapping/SwapsFromQubitMapping.cpp rename to tket/src/TokenSwappingWithArch/BestTsaWithArch.cpp index aa7ac3bd80..9e2e28fb0b 100644 --- a/tket/src/TokenSwapping/SwapsFromQubitMapping.cpp +++ b/tket/src/TokenSwappingWithArch/BestTsaWithArch.cpp @@ -12,20 +12,30 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "TokenSwapping/SwapsFromQubitMapping.hpp" - -#include -#include +#include "BestTsaWithArch.hpp" +#include "DistancesFromArchitecture.hpp" +#include "NeighboursFromArchitecture.hpp" #include "TokenSwapping/BestFullTsa.hpp" -#include "TokenSwapping/VertexMappingFunctions.hpp" #include "Utils/Assert.hpp" +#include "Utils/RNG.hpp" namespace tket { using namespace tsa_internal; -std::vector> get_swaps( +void BestTsaWithArch::append_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + const ArchitectureMapping& arch_mapping) { + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + RNG rng; + RiverFlowPathFinder path_finder(distances, neighbours, rng); + BestFullTsa().append_partial_solution( + swaps, vertex_mapping, distances, neighbours, path_finder); +} + +std::vector> BestTsaWithArch::get_swaps( const Architecture& architecture, const NodeMapping& node_mapping) { std::vector> swaps; // Before all the conversion and object construction, @@ -51,8 +61,7 @@ std::vector> get_swaps( check_mapping(vertex_mapping); SwapList raw_swap_list; - BestFullTsa().append_partial_solution( - raw_swap_list, vertex_mapping, arch_mapping); + BestTsaWithArch::append_solution(raw_swap_list, vertex_mapping, arch_mapping); // Finally, convert the raw swaps back to nodes. swaps.reserve(raw_swap_list.size()); diff --git a/tket/src/TokenSwappingWithArch/CMakeLists.txt b/tket/src/TokenSwappingWithArch/CMakeLists.txt new file mode 100644 index 0000000000..b975711762 --- /dev/null +++ b/tket/src/TokenSwappingWithArch/CMakeLists.txt @@ -0,0 +1,48 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project(tket-${COMP}) + +if (NOT ${COMP} STREQUAL "TokenSwappingWithArch") + message(FATAL_ERROR "Unexpected component name.") +endif() + +add_library(tket-${COMP} + ArchitectureMapping.cpp + BestTsaWithArch.cpp + DistancesFromArchitecture.cpp + NeighboursFromArchitecture.cpp + ) + +list(APPEND DEPS_${COMP} + Architecture + Graphs + OpType + TokenSwapping + Utils) + +foreach(DEP ${DEPS_${COMP}}) + target_include_directories( + tket-${COMP} PRIVATE ${TKET_${DEP}_INCLUDE_DIR}) + target_link_libraries( + tket-${COMP} PRIVATE tket-${DEP}) +endforeach() + +target_include_directories(tket-${COMP} + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${TKET_${COMP}_INCLUDE_DIR} + ${TKET_${COMP}_INCLUDE_DIR}/${COMP}) + +target_link_libraries(tket-${COMP} PRIVATE ${CONAN_LIBS}) diff --git a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp b/tket/src/TokenSwappingWithArch/DistancesFromArchitecture.cpp similarity index 98% rename from tket/src/TokenSwapping/DistancesFromArchitecture.cpp rename to tket/src/TokenSwappingWithArch/DistancesFromArchitecture.cpp index 3ae327ceda..4c277fb118 100644 --- a/tket/src/TokenSwapping/DistancesFromArchitecture.cpp +++ b/tket/src/TokenSwappingWithArch/DistancesFromArchitecture.cpp @@ -18,7 +18,6 @@ #include namespace tket { -namespace tsa_internal { DistancesFromArchitecture::DistancesFromArchitecture( const ArchitectureMapping& arch_mapping) @@ -88,5 +87,4 @@ size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { return distance_entry; } -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp b/tket/src/TokenSwappingWithArch/NeighboursFromArchitecture.cpp similarity index 97% rename from tket/src/TokenSwapping/NeighboursFromArchitecture.cpp rename to tket/src/TokenSwappingWithArch/NeighboursFromArchitecture.cpp index 57bc21724a..bab81a70d2 100644 --- a/tket/src/TokenSwapping/NeighboursFromArchitecture.cpp +++ b/tket/src/TokenSwappingWithArch/NeighboursFromArchitecture.cpp @@ -19,7 +19,6 @@ #include namespace tket { -namespace tsa_internal { NeighboursFromArchitecture::NeighboursFromArchitecture( const ArchitectureMapping& arch_mapping) @@ -66,5 +65,4 @@ const std::vector& NeighboursFromArchitecture::operator()( return neighbours; } -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp b/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/ArchitectureMapping.hpp similarity index 100% rename from tket/src/TokenSwapping/include/TokenSwapping/ArchitectureMapping.hpp rename to tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/ArchitectureMapping.hpp diff --git a/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/BestTsaWithArch.hpp b/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/BestTsaWithArch.hpp new file mode 100644 index 0000000000..3df8c50ac7 --- /dev/null +++ b/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/BestTsaWithArch.hpp @@ -0,0 +1,64 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "ArchitectureMapping.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" + +namespace tket { + +/** A simple wrapper around BestFullTsa from TokenSwapping, + * using Architecture objects directly to find distances and neighbours. + */ +struct BestTsaWithArch { + /** Given the desired vertex mapping, a list + * of swaps (which may or may not be empty), and information about + * the architecture (the underlying graph), append extra swaps to it + * to produce the desired mapping. + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping. Will be updated with + * the new added swaps. + * @param arch_mapping An ArchitectureMapping object, which knows the graph, + * and how to do Node <-> vertex size_t conversions. + */ + static void append_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + const ArchitectureMapping& arch_mapping); + + /** This specifies desired source->target vertex mappings. + * Any nodes not occurring as a key might be moved by the algorithm. + */ + typedef std::map NodeMapping; + + /** Given an architecture and desired source->target node mapping, + * compute a sequence of swaps (attempts to be as short as possible) + * which will perform that mapping. + * Note that it may use ALL th enodes in the architecture, + * not just the ones occurring in the node_mapping. + * If you wish certain nodes to be fixed, specify them in the mapping + * (with equal source and target). + * (However, note that they might STILL be moved, as long as by the end + * they are back at the start. If you really don't to involve a particular + * node, you mustremove it completely from the architecture). + * KNOWN BUG: it may give an error with disconnected architectures. + * @param architecture The raw object containing the graph. + * @param node_mapping The desired source->target node mapping. + * @return The required list of node pairs to swap. + */ + static std::vector> get_swaps( + const Architecture& architecture, const NodeMapping& node_mapping); +}; + +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp b/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/DistancesFromArchitecture.hpp similarity index 96% rename from tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp rename to tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/DistancesFromArchitecture.hpp index 7f2dc0834d..8f9f8ac2b0 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DistancesFromArchitecture.hpp +++ b/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/DistancesFromArchitecture.hpp @@ -15,11 +15,10 @@ #pragma once #include "ArchitectureMapping.hpp" -#include "DistancesInterface.hpp" -#include "SwapFunctions.hpp" +#include "TokenSwapping/DistancesInterface.hpp" +#include "TokenSwapping/SwapFunctions.hpp" namespace tket { -namespace tsa_internal { /** Directly get distances from an architecture object, * but evaluated lazily. @@ -84,5 +83,4 @@ class DistancesFromArchitecture : public DistancesInterface { const std::vector& path, size_t begin, size_t end); }; -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp b/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/NeighboursFromArchitecture.hpp similarity index 95% rename from tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp rename to tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/NeighboursFromArchitecture.hpp index 5a32f3cb5d..e4a833b591 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursFromArchitecture.hpp +++ b/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/NeighboursFromArchitecture.hpp @@ -15,10 +15,9 @@ #pragma once #include "ArchitectureMapping.hpp" -#include "NeighboursInterface.hpp" +#include "TokenSwapping/NeighboursInterface.hpp" namespace tket { -namespace tsa_internal { /** Stores and returns upon request the adjacent vertices to a given vertex * on a graph, using an underlying Architecture object. @@ -47,5 +46,4 @@ class NeighboursFromArchitecture : public NeighboursInterface { std::map> m_cached_neighbours; }; -} // namespace tsa_internal } // namespace tket From 96a8aa6159212241f81018ff791f0b8bec7b1198 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 15:38:41 +0000 Subject: [PATCH 086/146] remove architecture from TokenSwapping --- tket/src/TokenSwapping/BestFullTsa.cpp | 13 ------------ tket/src/TokenSwapping/CMakeLists.txt | 7 ------- .../include/TokenSwapping/BestFullTsa.hpp | 20 +------------------ 3 files changed, 1 insertion(+), 39 deletions(-) diff --git a/tket/src/TokenSwapping/BestFullTsa.cpp b/tket/src/TokenSwapping/BestFullTsa.cpp index f0f81ddbae..89966b8c98 100644 --- a/tket/src/TokenSwapping/BestFullTsa.cpp +++ b/tket/src/TokenSwapping/BestFullTsa.cpp @@ -14,8 +14,6 @@ #include "TokenSwapping/BestFullTsa.hpp" -#include "TokenSwapping/DistancesFromArchitecture.hpp" -#include "TokenSwapping/NeighboursFromArchitecture.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/VertexMapResizing.hpp" @@ -25,17 +23,6 @@ using namespace tsa_internal; BestFullTsa::BestFullTsa() { m_name = "BestFullTsa"; } -void BestFullTsa::append_partial_solution( - SwapList& swaps, VertexMapping& vertex_mapping, - const ArchitectureMapping& arch_mapping) { - DistancesFromArchitecture distances(arch_mapping); - NeighboursFromArchitecture neighbours(arch_mapping); - RiverFlowPathFinder path_finder(distances, neighbours, m_rng); - m_rng.set_seed(); - append_partial_solution( - swaps, vertex_mapping, distances, neighbours, path_finder); -} - void BestFullTsa::append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, DistancesInterface& distances, NeighboursInterface& neighbours, diff --git a/tket/src/TokenSwapping/CMakeLists.txt b/tket/src/TokenSwapping/CMakeLists.txt index 8ca4128346..3342fd69c8 100644 --- a/tket/src/TokenSwapping/CMakeLists.txt +++ b/tket/src/TokenSwapping/CMakeLists.txt @@ -19,22 +19,18 @@ if (NOT ${COMP} STREQUAL "TokenSwapping") endif() add_library(tket-${COMP} - ArchitectureMapping.cpp BestFullTsa.cpp CyclesCandidateManager.cpp CyclesGrowthManager.cpp CyclesPartialTsa.cpp CyclicShiftCostEstimate.cpp - DistancesFromArchitecture.cpp DistancesInterface.cpp DynamicTokenTracker.cpp HybridTsa.cpp - NeighboursFromArchitecture.cpp NeighboursInterface.cpp PartialTsaInterface.cpp RiverFlowPathFinder.cpp SwapListOptimiser.cpp - SwapsFromQubitMapping.cpp TrivialTSA.cpp VectorListHybridSkeleton.cpp TSAUtils/DistanceFunctions.cpp @@ -53,9 +49,6 @@ add_library(tket-${COMP} ) list(APPEND DEPS_${COMP} - Architecture - Graphs - OpType Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp index 14cd96d597..57fbe48834 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp @@ -14,11 +14,9 @@ #pragma once -#include "ArchitectureMapping.hpp" #include "HybridTsa.hpp" #include "SwapListOptimiser.hpp" #include "SwapListTableOptimiser.hpp" -#include "Utils/RNG.hpp" namespace tket { @@ -31,20 +29,6 @@ class BestFullTsa : public tsa_internal::PartialTsaInterface { public: BestFullTsa(); - /** The main entry function. Given the desired vertex mapping, a list - * of swaps (which may or may not be empty), and information about - * the architecture (the underlying graph), append extra swaps to it - * to produce the desired mapping. - * @param swaps The list of swaps to append to. - * @param vertex_mapping The current desired mapping. Will be updated with - * the new added swaps. - * @param arch_mapping An ArchitectureMapping object, which knows the graph, - * and how to do Node <-> vertex size_t conversions. - */ - void append_partial_solution( - SwapList& swaps, VertexMapping& vertex_mapping, - const ArchitectureMapping& arch_mapping); - /** We emphasise that, unlike the general PartialTsaInterface, the solution * returned is complete, AND includes all known swap list optimisations. * Warning: unlike most PartialTsaInterface objects, the vertex_mapping @@ -61,15 +45,13 @@ class BestFullTsa : public tsa_internal::PartialTsaInterface { */ virtual void append_partial_solution( SwapList& swaps, VertexMapping& vertex_mapping, - tsa_internal::DistancesInterface& distances, - tsa_internal::NeighboursInterface& neighbours, + DistancesInterface& distances, NeighboursInterface& neighbours, tsa_internal::RiverFlowPathFinder& path_finder) override; private: tsa_internal::HybridTsa m_hybrid_tsa; tsa_internal::SwapListOptimiser m_swap_list_optimiser; tsa_internal::SwapListTableOptimiser m_table_optimiser; - RNG m_rng; }; } // namespace tket From 890180b7f5ff7222f0bbd6aaa9152d8b9170d5da Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 15:39:50 +0000 Subject: [PATCH 087/146] move DistancesInterface, NeighboursInterface out of namespace tsa_internal --- tket/src/TokenSwapping/DistancesInterface.cpp | 2 -- tket/src/TokenSwapping/NeighboursInterface.cpp | 2 -- .../TokenSwapping/include/TokenSwapping/DistancesInterface.hpp | 2 -- .../TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp | 2 -- 4 files changed, 8 deletions(-) diff --git a/tket/src/TokenSwapping/DistancesInterface.cpp b/tket/src/TokenSwapping/DistancesInterface.cpp index a55d6b1f3f..2ad47c308b 100644 --- a/tket/src/TokenSwapping/DistancesInterface.cpp +++ b/tket/src/TokenSwapping/DistancesInterface.cpp @@ -17,7 +17,6 @@ using std::vector; namespace tket { -namespace tsa_internal { void DistancesInterface::register_shortest_path( const vector& /*path*/) {} @@ -34,5 +33,4 @@ void DistancesInterface::register_edge(size_t /*vertex1*/, size_t /*vertex2*/) { DistancesInterface::~DistancesInterface() {} -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/NeighboursInterface.cpp b/tket/src/TokenSwapping/NeighboursInterface.cpp index 805ffa02f7..33fc794b9b 100644 --- a/tket/src/TokenSwapping/NeighboursInterface.cpp +++ b/tket/src/TokenSwapping/NeighboursInterface.cpp @@ -17,7 +17,6 @@ #include "Utils/Exceptions.hpp" namespace tket { -namespace tsa_internal { const std::vector& NeighboursInterface::operator()(size_t) { throw NotImplemented("NeighboursInterface::get_neighbours: not implemented"); @@ -25,5 +24,4 @@ const std::vector& NeighboursInterface::operator()(size_t) { NeighboursInterface::~NeighboursInterface() {} -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp index 8f7f1a1063..44731f3690 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp @@ -18,7 +18,6 @@ #include namespace tket { -namespace tsa_internal { /** What is the distance between any two vertices on a graph? * To save time and cope with larger, sparse graphs, it may @@ -64,5 +63,4 @@ class DistancesInterface { virtual ~DistancesInterface(); }; -} // namespace tsa_internal } // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp index fe7be4387d..66bfb96afe 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp @@ -18,7 +18,6 @@ #include namespace tket { -namespace tsa_internal { /** What are the adjacent vertices to a given vertex on a graph? * For larger, sparse graphs, it might @@ -42,5 +41,4 @@ class NeighboursInterface { virtual ~NeighboursInterface(); }; -} // namespace tsa_internal } // namespace tket From b460561f36a07b6ae5be31f2288200978728eb09 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 15:44:48 +0000 Subject: [PATCH 088/146] update TokenSwapping tests to use TokenSwappingWithArch --- tket/tests/CMakeLists.txt | 1 + tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp | 7 +++---- tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp | 8 +------- tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp | 6 +++--- tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp | 4 ++-- tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp | 2 +- .../TokenSwapping/test_ArchitectureMappingEndToEnd.cpp | 6 +++--- .../TokenSwapping/test_DistancesFromArchitecture.cpp | 2 +- tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp | 6 +++--- tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp | 6 +++--- 11 files changed, 22 insertions(+), 28 deletions(-) diff --git a/tket/tests/CMakeLists.txt b/tket/tests/CMakeLists.txt index 4626c93a97..eaad06f9a2 100644 --- a/tket/tests/CMakeLists.txt +++ b/tket/tests/CMakeLists.txt @@ -61,6 +61,7 @@ target_link_libraries(test_tket PRIVATE tket-Program tket-Placement tket-TokenSwapping + tket-TokenSwappingWithArch tket-Mapping tket-Simulation tket-Transformations diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp index 23727c7107..89d8283325 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp @@ -16,9 +16,9 @@ #include -#include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/VertexMappingFunctions.hpp" #include "TokenSwapping/VertexSwapResult.hpp" +#include "TokenSwappingWithArch/BestTsaWithArch.hpp" using std::vector; @@ -80,8 +80,6 @@ struct VertexRelabellingManager { }; } // namespace -BestFullTsa& BestTsaTester::get_best_full_tsa() { return m_best_full_tsa; } - size_t BestTsaTester::get_checked_solution_size( const DecodedProblemData& problem_data) { m_architecture_work_data.edges.clear(); @@ -133,7 +131,8 @@ size_t BestTsaTester::get_checked_solution_size( m_vertex_mapping_copy = relabelling_manager.get_internal_mapping_for_tsa_input( problem_data.vertex_mapping); - m_best_full_tsa.append_partial_solution( + + BestTsaWithArch::append_solution( m_raw_swap_list, m_vertex_mapping_copy, arch_mapping); // Now check the calculated solution. diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp index 6afa674f0b..4d510889ea 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp @@ -15,7 +15,7 @@ #pragma once #include "DecodedProblemData.hpp" -#include "TokenSwapping/BestFullTsa.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" namespace tket { namespace tsa_internal { @@ -48,13 +48,7 @@ class BestTsaTester { const DecodedProblemData& problem_data, const DecodedArchitectureData& architecture_data); - /** For convenience in testing/experiments, allow access to the TSA, - * to change parameters etc. etc. from their defaults. - */ - BestFullTsa& get_best_full_tsa(); - private: - BestFullTsa m_best_full_tsa; SwapList m_raw_swap_list; DecodedArchitectureData m_architecture_work_data; std::vector> m_edges_vect; diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp index 44f6b56c40..9dc6aeb160 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp @@ -17,12 +17,12 @@ #include #include "DebugFunctions.hpp" -#include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/DistanceFunctions.hpp" -#include "TokenSwapping/DistancesFromArchitecture.hpp" -#include "TokenSwapping/NeighboursFromArchitecture.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/VertexSwapResult.hpp" +#include "TokenSwappingWithArch/ArchitectureMapping.hpp" +#include "TokenSwappingWithArch/DistancesFromArchitecture.hpp" +#include "TokenSwappingWithArch/NeighboursFromArchitecture.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp index d85543a0aa..dc115df395 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp @@ -14,9 +14,9 @@ #pragma once -#include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/PartialTsaInterface.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" +#include "TokenSwappingWithArch/ArchitectureMapping.hpp" #include "Utils/RNG.hpp" namespace tket { diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp index 524de4484b..0637d577fe 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp @@ -18,10 +18,10 @@ #include "TestStatsStructs.hpp" #include "TokenSwapping/DistanceFunctions.hpp" -#include "TokenSwapping/DistancesFromArchitecture.hpp" -#include "TokenSwapping/NeighboursFromArchitecture.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/VertexSwapResult.hpp" +#include "TokenSwappingWithArch/DistancesFromArchitecture.hpp" +#include "TokenSwappingWithArch/NeighboursFromArchitecture.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp index b494afe52d..a3de1aa99a 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp @@ -14,8 +14,8 @@ #pragma once -#include "TokenSwapping/ArchitectureMapping.hpp" #include "TokenSwapping/PartialTsaInterface.hpp" +#include "TokenSwappingWithArch/ArchitectureMapping.hpp" #include "Utils/RNG.hpp" namespace tket { diff --git a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp index 431875618c..3d46f7c455 100644 --- a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp +++ b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp @@ -15,9 +15,9 @@ #include #include -#include "TokenSwapping/ArchitectureMapping.hpp" -#include "TokenSwapping/DistancesFromArchitecture.hpp" -#include "TokenSwapping/NeighboursFromArchitecture.hpp" +#include "TokenSwappingWithArch/ArchitectureMapping.hpp" +#include "TokenSwappingWithArch/DistancesFromArchitecture.hpp" +#include "TokenSwappingWithArch/NeighboursFromArchitecture.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp index cf8d06d9f7..692ad6e296 100644 --- a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp +++ b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp @@ -16,7 +16,7 @@ #include #include -#include "TokenSwapping/DistancesFromArchitecture.hpp" +#include "TokenSwappingWithArch/DistancesFromArchitecture.hpp" using Catch::Matchers::Contains; using std::vector; diff --git a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp index 2f679434ba..4f932da85e 100644 --- a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp +++ b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp @@ -16,10 +16,10 @@ #include #include "TestUtils/ArchitectureEdgesReimplementation.hpp" -#include "TokenSwapping/ArchitectureMapping.hpp" -#include "TokenSwapping/DistancesFromArchitecture.hpp" -#include "TokenSwapping/NeighboursFromArchitecture.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "TokenSwappingWithArch/ArchitectureMapping.hpp" +#include "TokenSwappingWithArch/DistancesFromArchitecture.hpp" +#include "TokenSwappingWithArch/NeighboursFromArchitecture.hpp" #include "Utils/RNG.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp b/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp index 15a1b89400..cc990dba55 100644 --- a/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp +++ b/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp @@ -15,7 +15,7 @@ #include #include -#include "TokenSwapping/SwapsFromQubitMapping.hpp" +#include "TokenSwappingWithArch/BestTsaWithArch.hpp" #include "Utils/RNG.hpp" using std::vector; @@ -54,7 +54,7 @@ SCENARIO("get_swaps : swaps returned directly from architecture") { const auto node_final_positions = nodes_copy; problem_ss << " Node mapping:"; - NodeMapping node_mapping; + BestTsaWithArch::NodeMapping node_mapping; for (size_t ii = 0; ii < nodes.size(); ++ii) { problem_ss << "\ni=" << ii << " : " << node_final_positions[ii].repr() << " -> " << nodes[ii].repr(); @@ -89,7 +89,7 @@ SCENARIO("get_swaps : swaps returned directly from architecture") { "i=23 : gridNode[2, 3, 1] -> gridNode[2, 3, 1]"); // Calculate swaps to enact the permutation. - const auto node_swaps = get_swaps(arch, node_mapping); + const auto node_swaps = BestTsaWithArch::get_swaps(arch, node_mapping); // This will hopefully decrease over time // as we improve the algorithm. From 97b1efa3924784462fba732dafabfc32fe5ac832 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Tue, 15 Feb 2022 16:45:34 +0000 Subject: [PATCH 089/146] bool type for delay_measures --- tket/src/Predicates/include/Predicates/PassGenerators.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tket/src/Predicates/include/Predicates/PassGenerators.hpp b/tket/src/Predicates/include/Predicates/PassGenerators.hpp index 77055ecaab..b036147368 100644 --- a/tket/src/Predicates/include/Predicates/PassGenerators.hpp +++ b/tket/src/Predicates/include/Predicates/PassGenerators.hpp @@ -51,7 +51,7 @@ std::vector object */ PassPtr gen_full_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, const std::vector& config); -PassPtr gen_default_mapping_pass(const Architecture& arc, delay_measures = true); +PassPtr gen_default_mapping_pass(const Architecture& arc, bool delay_measures = true); PassPtr gen_cx_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, const std::vector& config, bool directed_cx, From f7878df71e6115a692b5babc75de55dd109ccae1 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Tue, 15 Feb 2022 17:10:32 +0000 Subject: [PATCH 090/146] Create architecture_test.py --- pytket/tests/architecture_test.py | 87 +++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 pytket/tests/architecture_test.py diff --git a/pytket/tests/architecture_test.py b/pytket/tests/architecture_test.py new file mode 100644 index 0000000000..a73e9c2c4b --- /dev/null +++ b/pytket/tests/architecture_test.py @@ -0,0 +1,87 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytket.circuit import Node +from pytket.architecture import NodeGraph, Architecture, SquareGrid, FullyConnected + + +def test_architectures() -> None: + basic_index_coupling = [(0, 1), (2, 1), (2, 3), (4, 3)] + basic_index_architecture = Architecture(basic_index_coupling) + basic_index_coupling_convert = [ + (Node(0), Node(1)), + (Node(2), Node(1)), + (Node(2), Node(3)), + (Node(4), Node(3)), + ] + assert basic_index_architecture.coupling == basic_index_coupling_convert + + node_0 = Node("example_register", 0) + node_1 = Node("example_register", 1) + node_2 = Node("example_register", 2) + node_3 = Node("example_register", 3) + basic_uid_coupling = [(node_0, node_1), (node_1, node_2), (node_2, node_3)] + basic_uid_architecture = Architecture(basic_uid_coupling) + assert basic_uid_architecture.coupling == basic_uid_coupling + + square_arc = SquareGrid(2, 2, 2) + assert square_arc.nodes[0] == Node("gridNode", [0, 0, 0]) + assert square_arc.coupling[0] == ( + Node("gridNode", [0, 0, 0]), + Node("gridNode", [0, 1, 0]), + ) + + +def test_architecture_eq() -> None: + coupling = [(1, 2), (3, 4), (0, 6), (0, 3)] + arc = Architecture(coupling) + + assert arc != Architecture([]) + assert arc == Architecture(coupling) + assert arc == Architecture([(Node(i), Node(j)) for (i, j) in coupling]) + assert arc != Architecture([(Node("s", i), Node("s", j)) for (i, j) in coupling]) + + # only Node IDs and coupling matters + g00, g01, g10, g11 = [ + Node("gridNode", [i, j, 0]) for i in range(2) for j in range(2) + ] + sq_arc = Architecture([(g00, g01), (g01, g11), (g00, g10), (g10, g11)]) + assert sq_arc == SquareGrid(2, 2) + assert sq_arc != Architecture([(g00, g01), (g01, g11), (g00, g10)]) + + +def test_fully_connected() -> None: + fc = FullyConnected(3) + assert fc.nodes == [Node("fcNode", i) for i in range(3)] + d = fc.to_dict() + fc1 = FullyConnected.from_dict(d) + assert fc == fc1 + + +def test_arch_types() -> None: + arch = Architecture([(0, 1)]) + assert isinstance(arch, Architecture) + assert isinstance(arch, NodeGraph) + fc = FullyConnected(2) + assert isinstance(fc, FullyConnected) + assert isinstance(fc, NodeGraph) + sg = SquareGrid(2, 2, 2) + assert isinstance(sg, SquareGrid) + assert isinstance(sg, NodeGraph) + +if __name__ == "__main__": + test_architectures() + test_architecture_eq() + test_fully_connected() + test_arch_types() \ No newline at end of file From 3a1ce49200906bfee2f33e5a77f98694a6a5d382 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 17:55:52 +0000 Subject: [PATCH 091/146] Reduce test times: add TSGlobalTestParameters with run_long_tests option --- .../test_SwapSequenceReductions.cpp | 59 +++++++++- .../TestUtils/TSGlobalTestParameters.hpp | 37 ++++++ .../test_BestTsaFixedSwapSequences.cpp | 111 ++++++++++++++---- tket/tests/tkettestsfiles.cmake | 2 + 4 files changed, 182 insertions(+), 27 deletions(-) create mode 100644 tket/tests/TokenSwapping/TestUtils/TSGlobalTestParameters.hpp diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp index 11242ea2fc..d8a8cb5a59 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp @@ -17,13 +17,11 @@ #include "../Data/FixedCompleteSolutions.hpp" #include "../Data/FixedSwapSequences.hpp" +#include "../TestUtils/TSGlobalTestParameters.hpp" #include "SwapSequenceReductionTester.hpp" using std::vector; -// NOTE: running all tests in this file currently takes ~19 seconds -// on an ordinary Windows laptop. - namespace tket { namespace tsa_internal { namespace tests { @@ -70,15 +68,43 @@ SCENARIO("Fixed swap sequences reduction") { "[658 equal probs (12376); 238 reduced probs (12962 vs 13463)]\n" "[Overall reduction 25338 vs 25839: 1%]"}; + unsigned skip_number = 1; + if (!TSGlobalTestParameters().run_long_tests) { + // The long tests take ~5 seconds on a 2021 Windows laptop; + // the shorter tests take ~0.4 seconds. + skip_number = 20; + expected_messages = vector{ + + "[n=0, Full tokens: init segm optim? true]\n" + "[25 equal probs (846); 0 reduced probs (0 vs 0)]\n" + "[Overall reduction 846 vs 846: 0%]", + + "[n=1, Partial tokens: init segm optim? true]\n" + "[46 equal probs (1348); 0 reduced probs (0 vs 0)]\n" + "[Overall reduction 1348 vs 1348: 0%]", + + "[n=2, Full tokens: init segm optim? false]\n" + "[24 equal probs (822); 1 reduced probs (22 vs 24)]\n" + "[Overall reduction 844 vs 846: 0%]", + + "[n=3, Partial tokens: init segm optim? false]\n" + "[34 equal probs (461); 12 reduced probs (844 vs 887)]\n" + "[Overall reduction 1305 vs 1348: 3%]"}; + } + const FixedSwapSequences fixed_sequences; SwapSequenceReductionTester tester; SwapSequenceReductionTester::Options options; vector calc_messages; - const auto add_solutions = [&tester, &options]( + const auto add_solutions = [&tester, &options, skip_number]( const vector& seq_codes, SequenceReductionStats& stats) { - for (const auto& code_str : seq_codes) { + for (unsigned ii = 0; ii < seq_codes.size(); ++ii) { + if (ii % skip_number != 0) { + continue; + } + const auto& code_str = seq_codes[ii]; const DecodedProblemData problem_data(code_str); const auto reduced_size = tester.get_checked_solution_size(problem_data, options); @@ -121,6 +147,26 @@ SCENARIO("Fixed complete problems") { "[164 equal probs (12771); 408 reduced probs (43946 vs 45894)]\n" "[Overall reduction 56717 vs 58665: 3%]"}; + unsigned skip_number = 1; + + if (!TSGlobalTestParameters().run_long_tests) { + // The long tests take ~10 seconds on a 2021 Windows laptop; + // the shorter tests take ~0.4 seconds. + skip_number = 20; + expected_messages = vector{ + "[n=0, Small: init segm optim? false]\n" + "[8 equal probs (48); 1 reduced probs (9 vs 10)]\n" + "[Overall reduction 57 vs 58: 1%]", + + "[n=1, Medium: init segm optim? false]\n" + "[8 equal probs (138); 1 reduced probs (23 vs 24)]\n" + "[Overall reduction 161 vs 162: 0%]", + + "[n=2, Large: init segm optim? false]\n" + "[10 equal probs (928); 16 reduced probs (1657 vs 1743)]\n" + "[Overall reduction 2585 vs 2671: 3%]"}; + } + SwapSequenceReductionTester::Options options; options.optimise_initial_segment_only = false; @@ -134,6 +180,9 @@ SCENARIO("Fixed complete problems") { // First element encodes the edges. const DecodedArchitectureData arch_data(problem_entry.second[0]); for (unsigned ii = 1; ii < problem_entry.second.size(); ++ii) { + if (ii % skip_number != 0) { + continue; + } const auto& problem_str = problem_entry.second[ii]; const DecodedProblemData problem_data( problem_str, DecodedProblemData::RequireContiguousVertices::NO); diff --git a/tket/tests/TokenSwapping/TestUtils/TSGlobalTestParameters.hpp b/tket/tests/TokenSwapping/TestUtils/TSGlobalTestParameters.hpp new file mode 100644 index 0000000000..9439b43170 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/TSGlobalTestParameters.hpp @@ -0,0 +1,37 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** If we want to use the same adjustable parameters across all + * TokenSwapping tests simultaneously, put them here. + */ +struct TSGlobalTestParameters { + /** Running all the token swapping tests can take ~30 seconds + * on an ordinary laptop. Set this to false in order to test + * a smaller set. + */ + bool run_long_tests; + + // TSGlobalTestParameters() : run_long_tests(true) {} + TSGlobalTestParameters() : run_long_tests(false) {} +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp index 6fedd4845d..be88c00b8e 100644 --- a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp +++ b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp @@ -17,11 +17,8 @@ #include "Data/FixedCompleteSolutions.hpp" #include "Data/FixedSwapSequences.hpp" #include "TestUtils/BestTsaTester.hpp" +#include "TestUtils/TSGlobalTestParameters.hpp" -// NOTE: currently, the tests in this file (solving ~2300 complete problems -// with the BestTSA, which includes full table lookup) -// take ~25 seconds on an ordinary Windows laptop. -// /// TODO: The swap table optimiser currently tries to optimise many segments; /// certainly it could be cut down, experimentation is needed /// to find how much to cut it down, without degrading solution @@ -154,28 +151,56 @@ struct Summary { } // namespace SCENARIO("Best TSA : solve problems from fixed swap sequences") { - const FixedSwapSequences sequences; - BestTsaTester tester; + FixedSwapSequences sequences; - const Summary full_seqs_summary(sequences.full, tester); - CHECK(full_seqs_summary.total_number_of_problems == 453); - CHECK( - full_seqs_summary.str == + CHECK(sequences.full.size() == 453); + std::string full_seq_str = "[248 equal (6088); 104 BETTER (4645 vs 4979): av 7% decr\n" - "101 WORSE (5893 vs 5451): av 8% incr]"); + "101 WORSE (5893 vs 5451): av 8% incr]"; // The fixed swap sequences have been optimised quite a lot already, - // so are probably quite close to optimal. - full_seqs_summary.check_overall_improvement(-0.653832); + // so are probably quite close to optimal (although we cannot know + // for sure without an exhaustive search; there is probably no known + // non-exponential time algorithm to find the optimal solution). + // So, (probably) getting within 1% of the optimal answer seems pretty good. + double full_seq_improvement = -0.653832; + + CHECK(sequences.partial.size() == 755); + std::string partial_seq_str = + "[455 equal (6487); 165 BETTER (7044 vs 7457): av 7% decr\n" + "135 WORSE (9124 vs 8604): av 6% incr]"; + double partial_seq_improvement = -0.474543; + + BestTsaTester tester; + + if (!TSGlobalTestParameters().run_long_tests) { + // The "long" tests take ~6 seconds on an ordinary 2021 Windows laptop. + // The reduced tests take ~50 milliseconds + // (and are also biased towards smaller problems, + // as the problem strings are sorted by length). + sequences.full.resize(40); + full_seq_str = + "[40 equal (231); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]"; + full_seq_improvement = 0; + + sequences.partial.resize(40); + partial_seq_str = + "[40 equal (166); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]"; + partial_seq_improvement = 0; + } + const Summary full_seqs_summary(sequences.full, tester); + CHECK(full_seqs_summary.total_number_of_problems == sequences.full.size()); + CHECK(full_seqs_summary.str == full_seq_str); + full_seqs_summary.check_overall_improvement(full_seq_improvement); const Summary partial_seqs_summary(sequences.partial, tester); - CHECK(partial_seqs_summary.total_number_of_problems == 755); CHECK( - partial_seqs_summary.str == - "[455 equal (6487); 165 BETTER (7044 vs 7457): av 7% decr\n" - "135 WORSE (9124 vs 8604): av 6% incr]"); - - partial_seqs_summary.check_overall_improvement(-0.474543); + partial_seqs_summary.total_number_of_problems == + sequences.partial.size()); + CHECK(partial_seqs_summary.str == partial_seq_str); + partial_seqs_summary.check_overall_improvement(partial_seq_improvement); } // Now we want to solve complete problems; this is one of @@ -261,13 +286,23 @@ class StatisticsGrouper { } // namespace SCENARIO("Best TSA : solve complete problems") { - const FixedCompleteSolutions complete_solutions; + FixedCompleteSolutions complete_solutions; + + // It's a map, with key the architecture name; this is the number + // of architectures, not problems. + CHECK(complete_solutions.solutions.size() == 21); + vector sizes; + for (const auto& entry : complete_solutions.solutions) { + sizes.push_back(entry.second.size()); + } + CHECK(sizes == vector{49, 97, 49, 49, 97, 93, 45, 45, 45, 39, 41, + 49, 39, 100, 48, 28, 22, 27, 49, 49, 38}); // For a good test, very different problems should not be amalgamated // in the statistics. Thus we determine the different categories using length // of encoding string, which presumably roughly corresponds to "problem size" // and problem hardness. - const vector expected_messages{ + vector expected_messages{ "[210 equal (1018); 19 BETTER (84 vs 111): av 24% decr\n" "2 WORSE (19 vs 15): av 26% incr]", @@ -283,6 +318,38 @@ SCENARIO("Best TSA : solve complete problems") { "[8 equal (1470); 164 BETTER (25183 vs 27141): av 6% decr\n" "44 WORSE (8722 vs 8384): av 3% incr]"}; + double expected_improvement = 3.25087; + + if (!TSGlobalTestParameters().run_long_tests) { + // The "long" tests take ~12 seconds on an ordinary 2021 Windows laptop. + // The reduced tests take ~700 milliseconds. + for (auto& entry : complete_solutions.solutions) { + auto reduced_size = entry.second.size() / 10; + if (reduced_size < 4) { + reduced_size = 4; + } + if (reduced_size < entry.second.size()) { + entry.second.resize(reduced_size); + } + } + expected_messages = vector{ + "[18 equal (62); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]", + + "[17 equal (82); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]", + + "[12 equal (119); 2 BETTER (15 vs 18): av 16% decr\n" + "0 WORSE (0 vs 0): av 0% incr]", + + "[6 equal (149); 6 BETTER (164 vs 173): av 5% decr\n" + "4 WORSE (115 vs 110): av 5% incr]", + + "[4 equal (163); 10 BETTER (535 vs 571): av 5% decr\n" + "5 WORSE (288 vs 273): av 5% incr]"}; + expected_improvement = 1.62791; + } + vector problem_sizes; for (const auto& entry : complete_solutions.solutions) { REQUIRE(entry.second.size() >= 2); @@ -311,7 +378,7 @@ SCENARIO("Best TSA : solve complete problems") { } // A positive result is good; the fixed complete problems are DIRECTLY // comparing our TSA with the solver used to generate them. - grouper.check_overall_improvement(3.25087); + grouper.check_overall_improvement(expected_improvement); } } // namespace tests diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 0c376130f4..0ac50ebe3c 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -38,6 +38,8 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/Graphs/test_DirectedGraph.cpp ${TKET_TESTS_DIR}/Graphs/test_ArticulationPoints.cpp ${TKET_TESTS_DIR}/Graphs/test_TreeSearch.cpp + # NOTE: For testing, it is easier to combine TokenSwapping + # and TokenSwappingWithArch tests together. ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedCompleteSolutions.cpp ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedSwapSequences.cpp ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/NeighboursFromEdges.cpp From f1e2c0b1568c731bc283398e4ae066b8112d9b0b Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 18:55:55 +0000 Subject: [PATCH 092/146] add test_DebugFunctions.cpp and remove test coverage exclusion --- .../TestUtils/DebugFunctions.cpp | 2 - .../TestUtils/test_DebugFunctions.cpp | 44 +++++++++++++++++++ tket/tests/tkettestsfiles.cmake | 1 + 3 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 tket/tests/TokenSwapping/TestUtils/test_DebugFunctions.cpp diff --git a/tket/tests/TokenSwapping/TestUtils/DebugFunctions.cpp b/tket/tests/TokenSwapping/TestUtils/DebugFunctions.cpp index 70070c8889..bc7aa5d063 100644 --- a/tket/tests/TokenSwapping/TestUtils/DebugFunctions.cpp +++ b/tket/tests/TokenSwapping/TestUtils/DebugFunctions.cpp @@ -19,7 +19,6 @@ namespace tket { namespace tsa_internal { -// GCOVR_EXCL_START std::string str(const VertexMapping& vertex_mapping) { std::stringstream ss; ss << "VM:"; @@ -28,7 +27,6 @@ std::string str(const VertexMapping& vertex_mapping) { } return ss.str(); } -// GCOVR_EXCL_STOP std::string str(const SwapList& swaps) { return str(swaps.to_vector()); } diff --git a/tket/tests/TokenSwapping/TestUtils/test_DebugFunctions.cpp b/tket/tests/TokenSwapping/TestUtils/test_DebugFunctions.cpp new file mode 100644 index 0000000000..47d89eefa6 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/test_DebugFunctions.cpp @@ -0,0 +1,44 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "DebugFunctions.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("debug functions - string functions") { + const VertexMapping vm{{0, 1}, {1, 2}, {3, 5}}; + CHECK(str(vm) == "VM: 0->1 1->2 3->5 "); + + vector swaps_vect; + swaps_vect.push_back(get_swap(111, 222)); + swaps_vect.push_back(get_swap(5555, 4444)); + const auto swaps_vect_str = str(swaps_vect); + CHECK(swaps_vect_str == " (111,222) (4444,5555) "); + + SwapList swaps; + for (const auto& swap : swaps_vect) { + swaps.push_back(swap); + } + CHECK(swaps_vect_str == str(swaps)); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 0ac50ebe3c..6339ef1224 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -59,6 +59,7 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/PartialTsaTesting.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ProblemGeneration.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/TestStatsStructs.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/test_DebugFunctions.cpp ${TKET_TESTS_DIR}/TokenSwapping/TSAUtils/test_SwapFunctions.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_BestTsaFixedSwapSequences.cpp From de2f9635bb122ec6547dc86155cd73da29c04e1c Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Tue, 15 Feb 2022 19:26:59 +0000 Subject: [PATCH 093/146] Remove unreachable code. --- .../TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp index 4abf5fec1e..c604b1e2a9 100644 --- a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp @@ -490,8 +490,7 @@ OverwriteIntervalResult VectorListHybrid::overwrite_interval( m_links_data.next(result.final_overwritten_element_id); } // Should be impossible to reach here - TKET_ASSERT(!"VectorListHybrid::overwrite_interval"); - return result; + TKET_ASSERT(false); } template From d7bbd01adf9759adae0aabf1afd88a8678117f06 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 10:57:59 +0000 Subject: [PATCH 094/146] update architecture binder --- pytket/binders/architecture.cpp | 39 ++++++++++++++++----------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/pytket/binders/architecture.cpp b/pytket/binders/architecture.cpp index 95f63acd58..e083f786e3 100644 --- a/pytket/binders/architecture.cpp +++ b/pytket/binders/architecture.cpp @@ -28,7 +28,11 @@ using json = nlohmann::json; namespace tket { PYBIND11_MODULE(architecture, m) { - py::class_( + py::class_>( + m, "NodeGraph", + "Abstract class for describing a device connectivity graph."); + + py::class_>( m, "Architecture", "Class describing the connectivity of qubits on a general device.") .def( @@ -40,26 +44,11 @@ PYBIND11_MODULE(architecture, m) { "operations", py::arg("connections")) .def( - py::init> &>(), + py::init>>(), "The constructor for an architecture with connectivity " "between qubits.\n\n:param connections: A list of pairs " "representing Nodes that can perform two-qubit operations", py::arg("connections")) - .def( - "__repr__", - [](const Architecture &arc) { - return ""; - }) - .def( - "get_distance", &Architecture::get_distance, - "given two nodes in Architecture, " - "returns distance between them", - py::arg("node_0"), py::arg("node_1")) - .def( - "get_adjacent_nodes", &Architecture::get_neighbour_nodes, - "given a node, returns adjacent nodes in Architecture.", - py::arg("node")) .def_property_readonly( "nodes", &Architecture::get_all_nodes_vec, "Returns all nodes of architecture as Node objects.") @@ -86,8 +75,18 @@ PYBIND11_MODULE(architecture, m) { return ""; }) + .def( + "get_distance", &Architecture::get_distance, + "given two nodes in Architecture, " + "returns distance between them", + py::arg("node_0"), py::arg("node_1")) + .def( + "get_adjacent_nodes", &Architecture::get_neighbour_nodes, + "given a node, returns adjacent nodes in Architecture.", + py::arg("node")) .def(py::self == py::self); - py::class_, Architecture>( + + py::class_, Architecture, graphs::AbstractGraph>( m, "SquareGrid", "Architecture class for qubits arranged in a square lattice of " "given number of rows and columns. Qubits are arranged with qubits " @@ -136,7 +135,7 @@ PYBIND11_MODULE(architecture, m) { ", columns=" + std::to_string(arc.get_columns()) + ", layers=" + std::to_string(arc.get_layers()) + ">"; }); - py::class_, Architecture>( + py::class_, Architecture, graphs::AbstractGraph>( m, "RingArch", "Architecture class for number of qubits arranged in a ring.") .def( @@ -147,7 +146,7 @@ PYBIND11_MODULE(architecture, m) { .def("__repr__", [](const RingArch &arc) { return ""; }); - py::class_( + py::class_, graphs::AbstractGraph>( m, "FullyConnected", "An architecture with full connectivity between qubits.") .def( From c6259aae435e8629cb0e25c7edd29d34637c750d Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 11:30:51 +0000 Subject: [PATCH 095/146] Revert "update architecture binder" This reverts commit d7bbd01adf9759adae0aabf1afd88a8678117f06. --- pytket/binders/architecture.cpp | 39 +++++++++++++++++---------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/pytket/binders/architecture.cpp b/pytket/binders/architecture.cpp index e083f786e3..95f63acd58 100644 --- a/pytket/binders/architecture.cpp +++ b/pytket/binders/architecture.cpp @@ -28,11 +28,7 @@ using json = nlohmann::json; namespace tket { PYBIND11_MODULE(architecture, m) { - py::class_>( - m, "NodeGraph", - "Abstract class for describing a device connectivity graph."); - - py::class_>( + py::class_( m, "Architecture", "Class describing the connectivity of qubits on a general device.") .def( @@ -44,11 +40,26 @@ PYBIND11_MODULE(architecture, m) { "operations", py::arg("connections")) .def( - py::init>>(), + py::init> &>(), "The constructor for an architecture with connectivity " "between qubits.\n\n:param connections: A list of pairs " "representing Nodes that can perform two-qubit operations", py::arg("connections")) + .def( + "__repr__", + [](const Architecture &arc) { + return ""; + }) + .def( + "get_distance", &Architecture::get_distance, + "given two nodes in Architecture, " + "returns distance between them", + py::arg("node_0"), py::arg("node_1")) + .def( + "get_adjacent_nodes", &Architecture::get_neighbour_nodes, + "given a node, returns adjacent nodes in Architecture.", + py::arg("node")) .def_property_readonly( "nodes", &Architecture::get_all_nodes_vec, "Returns all nodes of architecture as Node objects.") @@ -75,18 +86,8 @@ PYBIND11_MODULE(architecture, m) { return ""; }) - .def( - "get_distance", &Architecture::get_distance, - "given two nodes in Architecture, " - "returns distance between them", - py::arg("node_0"), py::arg("node_1")) - .def( - "get_adjacent_nodes", &Architecture::get_neighbour_nodes, - "given a node, returns adjacent nodes in Architecture.", - py::arg("node")) .def(py::self == py::self); - - py::class_, Architecture, graphs::AbstractGraph>( + py::class_, Architecture>( m, "SquareGrid", "Architecture class for qubits arranged in a square lattice of " "given number of rows and columns. Qubits are arranged with qubits " @@ -135,7 +136,7 @@ PYBIND11_MODULE(architecture, m) { ", columns=" + std::to_string(arc.get_columns()) + ", layers=" + std::to_string(arc.get_layers()) + ">"; }); - py::class_, Architecture, graphs::AbstractGraph>( + py::class_, Architecture>( m, "RingArch", "Architecture class for number of qubits arranged in a ring.") .def( @@ -146,7 +147,7 @@ PYBIND11_MODULE(architecture, m) { .def("__repr__", [](const RingArch &arc) { return ""; }); - py::class_, graphs::AbstractGraph>( + py::class_( m, "FullyConnected", "An architecture with full connectivity between qubits.") .def( From 7533f9019a8d8ce92f80ee629797b6a427219f47 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 11:34:28 +0000 Subject: [PATCH 096/146] formatting, remove nodegraph test --- pytket/tests/architecture_test.py | 5 +---- tket/src/Predicates/include/Predicates/PassGenerators.hpp | 3 ++- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/pytket/tests/architecture_test.py b/pytket/tests/architecture_test.py index a73e9c2c4b..980e1e4410 100644 --- a/pytket/tests/architecture_test.py +++ b/pytket/tests/architecture_test.py @@ -13,7 +13,7 @@ # limitations under the License. from pytket.circuit import Node -from pytket.architecture import NodeGraph, Architecture, SquareGrid, FullyConnected +from pytket.architecture import Architecture, SquareGrid, FullyConnected def test_architectures() -> None: @@ -72,13 +72,10 @@ def test_fully_connected() -> None: def test_arch_types() -> None: arch = Architecture([(0, 1)]) assert isinstance(arch, Architecture) - assert isinstance(arch, NodeGraph) fc = FullyConnected(2) assert isinstance(fc, FullyConnected) - assert isinstance(fc, NodeGraph) sg = SquareGrid(2, 2, 2) assert isinstance(sg, SquareGrid) - assert isinstance(sg, NodeGraph) if __name__ == "__main__": test_architectures() diff --git a/tket/src/Predicates/include/Predicates/PassGenerators.hpp b/tket/src/Predicates/include/Predicates/PassGenerators.hpp index b036147368..704e06b15d 100644 --- a/tket/src/Predicates/include/Predicates/PassGenerators.hpp +++ b/tket/src/Predicates/include/Predicates/PassGenerators.hpp @@ -51,7 +51,8 @@ std::vector object */ PassPtr gen_full_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, const std::vector& config); -PassPtr gen_default_mapping_pass(const Architecture& arc, bool delay_measures = true); +PassPtr gen_default_mapping_pass( + const Architecture& arc, bool delay_measures = true); PassPtr gen_cx_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, const std::vector& config, bool directed_cx, From 220a7b03a7b39ce45c7287f01ff39f42f23b8a21 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 11:50:05 +0000 Subject: [PATCH 097/146] update architecture and mapping tests --- pytket/binders/architecture.cpp | 2 +- pytket/tests/architecture_test.py | 3 ++- pytket/tests/mapping_test.py | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pytket/binders/architecture.cpp b/pytket/binders/architecture.cpp index 95f63acd58..823bc1455d 100644 --- a/pytket/binders/architecture.cpp +++ b/pytket/binders/architecture.cpp @@ -28,7 +28,7 @@ using json = nlohmann::json; namespace tket { PYBIND11_MODULE(architecture, m) { - py::class_( + py::class_>( m, "Architecture", "Class describing the connectivity of qubits on a general device.") .def( diff --git a/pytket/tests/architecture_test.py b/pytket/tests/architecture_test.py index 980e1e4410..765fb6a9c0 100644 --- a/pytket/tests/architecture_test.py +++ b/pytket/tests/architecture_test.py @@ -77,8 +77,9 @@ def test_arch_types() -> None: sg = SquareGrid(2, 2, 2) assert isinstance(sg, SquareGrid) + if __name__ == "__main__": test_architectures() test_architecture_eq() test_fully_connected() - test_arch_types() \ No newline at end of file + test_arch_types() diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py index 3aea34b857..69dc265878 100644 --- a/pytket/tests/mapping_test.py +++ b/pytket/tests/mapping_test.py @@ -35,7 +35,8 @@ def route_subcircuit_func( relabelling_map = dict() for qb in circuit.qubits: - unused_nodes.remove(qb) + if qb in unused_nodes: + unused_nodes.remove(qb) for qb in circuit.qubits: if qb not in architecture.nodes: From 1bd2dfd9738a6ec1eb5b0bfdca6267350e575f5a Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 11:55:30 +0000 Subject: [PATCH 098/146] add architecture_aware_synthesis_test.py --- .../architecture_aware_synthesis_test.py | 209 ++++++++++++++++++ 1 file changed, 209 insertions(+) create mode 100644 pytket/tests/architecture_aware_synthesis_test.py diff --git a/pytket/tests/architecture_aware_synthesis_test.py b/pytket/tests/architecture_aware_synthesis_test.py new file mode 100644 index 0000000000..73f6ded67f --- /dev/null +++ b/pytket/tests/architecture_aware_synthesis_test.py @@ -0,0 +1,209 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytket.circuit import Circuit # type: ignore +from pytket.architecture import Architecture # type: ignore +from pytket.passes import AASRouting, CNotSynthType # type: ignore +from pytket.predicates import CompilationUnit # type: ignore + + +def test_AAS() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc, lookahead=2) + assert pass1.apply(circ) + + +def test_AAS_2() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc) + assert pass1.apply(circ) + + +def test_AAS_3() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc, lookahead=2) + assert pass1.apply(circ) + + +def test_AAS_4() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc) + assert pass1.apply(circ) + + +def test_AAS_5() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc, lookahead=2) + assert pass1.apply(circ) + + +def test_AAS_6() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc) + assert pass1.apply(circ) + + +def test_AAS_7() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc, lookahead=2) + assert pass1.apply(circ) + + +def test_AAS_8() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.CX(0, 1) + circ.H(0) + circ.Z(1) + circ.CX(0, 3) + circ.Rx(1.5, 3) + circ.CX(2, 4) + circ.X(2) + circ.CX(1, 4) + circ.CX(0, 4) + pass1 = AASRouting(arc, lookahead=2) + assert pass1.apply(circ) + + +def test_AAS_9() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8]]) + circ = Circuit(9) + circ.CX(0, 8).CX(8, 1).CX(1, 7).CX(7, 2).CX(2, 6).CX(6, 3).CX(3, 5).CX(5, 4) + circ.Rz(0.5, 4) + pass1 = AASRouting(arc, lookahead=2) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() < 56 + + +def test_AAS_10() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) + circ = Circuit(7) + circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) + circ.Rz(0.5, 3) + pass1 = AASRouting(arc, lookahead=2) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() < 33 + + +def test_AAS_11() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) + circ = Circuit(7) + circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) + circ.Rz(0.5, 3) + pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.SWAP) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() == 119 + + +def test_AAS_12() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) + circ = Circuit(7) + circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) + circ.Rz(0.5, 3) + pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.HamPath) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() == 36 + + +def test_AAS_13() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) + circ = Circuit(7) + circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) + circ.Rz(0.5, 3) + pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() == 28 + + +def test_AAS_14() -> None: + arc = Architecture([[0, 1], [1, 0], [1, 2], [2, 1]]) + circ = Circuit(3).CZ(0, 1) + pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() == 3 + + +def test_AAS_15() -> None: + arc = Architecture([[0, 1], [1, 0], [1, 2], [2, 1]]) + circ = Circuit(2).CZ(0, 1) + pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() == 3 + + +if __name__ == "__main__": + test_AAS() + test_AAS_2() + test_AAS_3() + test_AAS_4() + test_AAS_5() + test_AAS_6() + test_AAS_7() + test_AAS_8() + test_AAS_9() + test_AAS_10() + test_AAS_11() + test_AAS_12() + test_AAS_13() + test_AAS_14() + test_AAS_15() From 770ac41b838d64de2fce30355fc8fcc31461cfb6 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 12:04:48 +0000 Subject: [PATCH 099/146] Create placement_test.py --- pytket/tests/placement_test.py | 173 +++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 pytket/tests/placement_test.py diff --git a/pytket/tests/placement_test.py b/pytket/tests/placement_test.py new file mode 100644 index 0000000000..8ce0f946b8 --- /dev/null +++ b/pytket/tests/placement_test.py @@ -0,0 +1,173 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path +from pytket import Circuit +from pytket.circuit import Node, Qubit +from pytket.architecture import Architecture +from pytket.placement import ( + Placement, + LinePlacement, + GraphPlacement, + NoiseAwarePlacement, + place_with_map, +) +from pytket.mapping import MappingManager, LexiRouteRoutingMethod +import json + + +def test_placements() -> None: + test_coupling = [(0, 1), (1, 2), (1, 3), (4, 1), (4, 5)] + test_architecture = Architecture(test_coupling) + circ = Circuit(6) + for pair in test_coupling: + circ.CX(pair[0], pair[1]) + circ_qbs = circ.qubits + base_pl = Placement(test_architecture) + line_pl = LinePlacement(test_architecture) + graph_pl = GraphPlacement(test_architecture) + base_placed = circ.copy() + line_placed = circ.copy() + graph_placed = circ.copy() + + base_map = base_pl.get_placement_map(circ) + line_map = line_pl.get_placement_map(circ) + graph_map = graph_pl.get_placement_map(circ) + + assert base_map != line_map + assert base_map != graph_map + assert circ.qubits == circ_qbs + + base_pl.place(base_placed) + line_pl.place(line_placed) + graph_pl.place(graph_placed) + + assert line_placed.qubits[0] == line_map[circ_qbs[0]] + assert line_placed.qubits[1] == line_map[circ_qbs[1]] + assert line_placed.qubits[2] == line_map[circ_qbs[2]] + + assert base_placed.qubits[0] == base_map[circ_qbs[0]] + assert base_placed.qubits[1] == base_map[circ_qbs[1]] + assert base_placed.qubits[2] == base_map[circ_qbs[2]] + + assert graph_placed.qubits[0] == graph_map[circ_qbs[0]] + assert graph_placed.qubits[1] == graph_map[circ_qbs[1]] + assert graph_placed.qubits[2] == graph_map[circ_qbs[2]] + + assert circ_qbs != base_placed.qubits + assert circ_qbs != line_placed.qubits + assert circ_qbs != graph_placed.qubits + + mm = MappingManager(test_architecture) + mm.route_circuit(base_placed, [LexiRouteRoutingMethod()]) + mm.route_circuit(line_placed, [LexiRouteRoutingMethod()]) + mm.route_circuit(graph_placed, [LexiRouteRoutingMethod()]) + + assert base_placed.valid_connectivity(test_architecture, False) + assert line_placed.valid_connectivity(test_architecture, False) + assert graph_placed.valid_connectivity(test_architecture, False) + + +def test_placements_serialization() -> None: + with open( + Path(__file__).resolve().parent / "json_test_files" / "placements.json", "r" + ) as f: + dict = json.load(f) + base_pl_serial = dict["base_placement"] + line_pl_serial = dict["line_placement"] + graph_pl_serial = dict["graph_placement"] + noise_pl_serial = dict["noise_placement"] + + assert Placement.from_dict(base_pl_serial).to_dict() == base_pl_serial + assert LinePlacement.from_dict(line_pl_serial).to_dict() == line_pl_serial + assert GraphPlacement.from_dict(graph_pl_serial).to_dict() == graph_pl_serial + assert NoiseAwarePlacement.from_dict(noise_pl_serial).to_dict() == noise_pl_serial + + +def test_placement_config() -> None: + test_coupling = [(0, 1), (1, 2), (1, 3), (4, 1), (4, 5)] + test_architecture = Architecture(test_coupling) + test_pl = GraphPlacement(test_architecture) + test_circuit = Circuit(6) + test_circuit.CX(0, 1) + test_circuit.CX(2, 3) + test_circuit.CX(4, 3) + test_circuit.CX(2, 4) + test_circuit.CX(3, 5) + test_circuit.CX(0, 5) + circ1 = test_circuit.copy() + circ2 = test_circuit.copy() + map1 = test_pl.get_placement_map(test_circuit) + test_pl.place(circ1) + test_pl.modify_config( + max_matches=1, depth_limit=0, max_interaction_edges=2, timeout=100 + ) + map2 = test_pl.get_placement_map(test_circuit) + test_pl.place(circ2) + assert map1 != map2 + + mm = MappingManager(test_architecture) + mm.route_circuit(circ1, [LexiRouteRoutingMethod()]) + mm.route_circuit(circ2, [LexiRouteRoutingMethod()]) + assert circ1.n_gates < circ2.n_gates + + +def test_convert_index_mapping() -> None: + test_circuit = Circuit(6) + test_circuit.CX(0, 1) + test_circuit.CX(2, 3) + test_circuit.CX(4, 3) + test_circuit.CX(2, 4) + test_circuit.CX(3, 5) + test_circuit.CX(0, 5) + + c0 = test_circuit.copy() + c1 = test_circuit.copy() + + index_map = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3} + uid_map = {Qubit(i): Node(j) for i, j in index_map.items()} + circ_qbs = test_circuit.qubits + assert uid_map[circ_qbs[0]] == Node(1) + assert uid_map[circ_qbs[1]] == Node(2) + assert uid_map[circ_qbs[2]] == Node(0) + assert uid_map[circ_qbs[3]] == Node(4) + assert uid_map[circ_qbs[4]] == Node(3) + + place_with_map(test_circuit, uid_map) + + new_circ_qbs = test_circuit.qubits + assert circ_qbs != new_circ_qbs + assert new_circ_qbs[0] == Node(0) + assert new_circ_qbs[1] == Node(1) + assert new_circ_qbs[2] == Node(2) + assert new_circ_qbs[3] == Node(3) + assert new_circ_qbs[4] == Node(4) + assert new_circ_qbs[5] == Qubit("unplaced", 0) + + index_map_0 = {0: 5, 1: 4, 2: 0, 3: 1, 4: 3, 5: 2} + index_map_1 = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3, 5: 5} + uid_0 = {Qubit(i): Node(j) for i, j in index_map_0.items()} + uid_1 = {Qubit(i): Node(j) for i, j in index_map_1.items()} + assert uid_0 != uid_1 + + place_with_map(c0, uid_0) + place_with_map(c1, uid_1) + assert c0 != c1 + + +if __name__ == "__main__": + test_placements() + test_placements_serialization() + test_placement_config() + test_convert_index_mapping() From 9fbbd951e20d39980fb536cad509cc26d4f0404a Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 13:21:53 +0000 Subject: [PATCH 100/146] Update range of python mapping tests --- .../architecture_aware_synthesis_test.py | 14 +- pytket/tests/mapping_test.py | 26 +- pytket/tests/placement_test.py | 88 ++++++- pytket/tests/transform_test.py | 235 +++++++++++++++++- 4 files changed, 353 insertions(+), 10 deletions(-) diff --git a/pytket/tests/architecture_aware_synthesis_test.py b/pytket/tests/architecture_aware_synthesis_test.py index 73f6ded67f..62c0540f2f 100644 --- a/pytket/tests/architecture_aware_synthesis_test.py +++ b/pytket/tests/architecture_aware_synthesis_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytket.circuit import Circuit # type: ignore +from pytket.circuit import Circuit, OpType # type: ignore from pytket.architecture import Architecture # type: ignore from pytket.passes import AASRouting, CNotSynthType # type: ignore from pytket.predicates import CompilationUnit # type: ignore @@ -191,6 +191,17 @@ def test_AAS_15() -> None: assert out_circ.depth() == 3 +def test_noncontiguous_arc_phase_poly() -> None: + # testing non-contiguous ascending named nodes + arc = Architecture([[0, 2]]) + pass1 = AASRouting(arc, lookahead=1) + c = Circuit(2).H(0).H(1) + pass1.apply(c) + assert c.n_gates_of_type(OpType.H) == 2 + assert c.n_gates_of_type(OpType.CX) == 0 + assert c.n_gates_of_type(OpType.CX) == 0 + + if __name__ == "__main__": test_AAS() test_AAS_2() @@ -207,3 +218,4 @@ def test_AAS_15() -> None: test_AAS_13() test_AAS_14() test_AAS_15() + test_noncontiguous_arc_phase_poly() diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py index 69dc265878..cc2cfcc82c 100644 --- a/pytket/tests/mapping_test.py +++ b/pytket/tests/mapping_test.py @@ -15,7 +15,8 @@ from pytket.mapping import MappingManager, RoutingMethodCircuit, LexiRouteRoutingMethod # type: ignore from pytket.architecture import Architecture # type: ignore from pytket import Circuit, OpType -from pytket.circuit import Node # type: ignore +from pytket.circuit import Node, Qubit # type: ignore +from pytket.placement import Placement from typing import Tuple, Dict @@ -191,7 +192,30 @@ def test_RoutingMethodCircuit_custom_list() -> None: assert routed_commands[4].qubits == [nodes[1], nodes[2]] +def test_basic_mapping() -> None: + circ = Circuit(5) + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ.CX(0, 1) + circ.CX(0, 3) + circ.CX(2, 4) + circ.CX(1, 4) + circ.CX(0, 4) + + init_map = dict() + init_map[Qubit(0)] = Node(0) + init_map[Qubit(1)] = Node(1) + init_map[Qubit(2)] = Node(2) + init_map[Qubit(3)] = Node(3) + init_map[Qubit(4)] = Node(4) + pl = Placement(arc) + pl.place_with_map(circ, init_map) + MappingManager(arc).route_circuit(circ, [LexiRouteRoutingMethod(50)]) + assert circ.valid_connectivity(arc, False) + assert len(circ.get_commands()) == 10 + + if __name__ == "__main__": test_LexiRouteRoutingMethod() test_RoutingMethodCircuit_custom() test_RoutingMethodCircuit_custom_list() + test_basic_mapping() diff --git a/pytket/tests/placement_test.py b/pytket/tests/placement_test.py index 8ce0f946b8..2db42081be 100644 --- a/pytket/tests/placement_test.py +++ b/pytket/tests/placement_test.py @@ -13,17 +13,20 @@ # limitations under the License. from pathlib import Path -from pytket import Circuit -from pytket.circuit import Node, Qubit -from pytket.architecture import Architecture -from pytket.placement import ( +from pytket import Circuit # type: ignore +from pytket.circuit import Node, Qubit # type: ignore +from pytket.architecture import Architecture # type: ignore +from pytket.placement import ( # type: ignore Placement, LinePlacement, GraphPlacement, NoiseAwarePlacement, place_with_map, ) -from pytket.mapping import MappingManager, LexiRouteRoutingMethod +from pytket.passes import PauliSimp, DefaultMappingPass # type: ignore +from pytket.mapping import MappingManager, LexiRouteRoutingMethod # type: ignore +from pytket.qasm import circuit_from_qasm # type: ignore + import json @@ -166,8 +169,83 @@ def test_convert_index_mapping() -> None: assert c0 != c1 +def test_place_with_map_twice() -> None: + # TKET-671 + c = Circuit(6).CX(0, 1).CX(2, 3).CX(4, 3).CX(2, 4).CX(3, 5).CX(0, 5) + + index_map = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3} + uid_map = {Qubit(i): Node(j) for i, j in index_map.items()} + c_qbs = c.qubits + assert uid_map[c_qbs[0]] == Node(1) + assert uid_map[c_qbs[1]] == Node(2) + assert uid_map[c_qbs[2]] == Node(0) + assert uid_map[c_qbs[3]] == Node(4) + assert uid_map[c_qbs[4]] == Node(3) + + assert all(qb.reg_name == "q" for qb in c.qubits) + place_with_map(c, uid_map) + assert all(qb.reg_name in ["node", "unplaced"] for qb in c.qubits) + place_with_map(c, uid_map) + assert all(qb.reg_name == "unplaced" for qb in c.qubits) + + +def test_big_placement() -> None: + # TKET-1275 + c = circuit_from_qasm( + Path(__file__).resolve().parent / "qasm_test_files" / "test14.qasm" + ) + arc = Architecture( + [ + [0, 1], + [0, 14], + [1, 0], + [1, 2], + [1, 13], + [2, 1], + [2, 3], + [2, 12], + [3, 2], + [3, 4], + [3, 11], + [4, 3], + [4, 5], + [4, 10], + [5, 4], + [5, 6], + [5, 9], + [6, 5], + [6, 8], + [7, 8], + [8, 6], + [8, 7], + [8, 9], + [9, 5], + [9, 8], + [9, 10], + [10, 4], + [10, 9], + [10, 11], + [11, 3], + [11, 10], + [11, 12], + [12, 2], + [12, 11], + [12, 13], + [13, 1], + [13, 12], + [13, 14], + [14, 0], + [14, 13], + ] + ) + assert PauliSimp().apply(c) + assert DefaultMappingPass(arc).apply(c) + + if __name__ == "__main__": test_placements() test_placements_serialization() test_placement_config() test_convert_index_mapping() + test_place_with_map_twice() + test_big_placement() diff --git a/pytket/tests/transform_test.py b/pytket/tests/transform_test.py index 14d5d3afd8..8421d4aea1 100644 --- a/pytket/tests/transform_test.py +++ b/pytket/tests/transform_test.py @@ -13,12 +13,26 @@ # limitations under the License. from pathlib import Path -from pytket.circuit import Circuit, OpType, PauliExpBox # type: ignore +from pytket.circuit import Circuit, OpType, PauliExpBox, Node, Qubit # type: ignore from pytket.pauli import Pauli # type: ignore -from pytket.passes import RemoveRedundancies, KAKDecomposition, ThreeQubitSquash, CommuteThroughMultis, PauliSquash, FullPeepholeOptimise, GlobalisePhasedX # type: ignore -from pytket.predicates import CompilationUnit # type: ignore +from pytket.passes import ( # type: ignore + RemoveRedundancies, + KAKDecomposition, + CommuteThroughMultis, + PauliSquash, + FullPeepholeOptimise, + DefaultMappingPass, + FullMappingPass, + RoutingPass, + PlacementPass, + CXMappingPass, +) +from pytket.predicates import CompilationUnit, NoMidMeasurePredicate # type: ignore from pytket.transform import Transform, CXConfigType, PauliSynthStrat # type: ignore from pytket.qasm import circuit_from_qasm +from pytket.architecture import Architecture # type: ignore +from pytket.mapping import MappingManager, LexiRouteRoutingMethod # type: ignore +from pytket.placement import Placement, GraphPlacement, LinePlacement, NoiseAwarePlacement # type: ignore from sympy import Symbol # type: ignore import numpy as np @@ -729,6 +743,213 @@ def test_full_peephole_optimise() -> None: assert n_cx1 < n_cz +def test_decompose_swap_to_cx() -> None: + circ = Circuit(5) + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ.CX(0, 1) + circ.CX(0, 3) + circ.CX(2, 4) + circ.CX(1, 4) + circ.CX(0, 4) + + init_map = dict() + init_map[Qubit(0)] = Node(0) + init_map[Qubit(1)] = Node(1) + init_map[Qubit(2)] = Node(2) + init_map[Qubit(3)] = Node(3) + init_map[Qubit(4)] = Node(4) + + pl = Placement(arc) + pl.place_with_map(circ, init_map) + + MappingManager(arc).route_circuit(circ, [LexiRouteRoutingMethod()]) + assert circ.valid_connectivity(arc, False) + Transform.DecomposeSWAPtoCX(arc).apply(circ) + assert len(circ.get_commands()) == 20 + Transform.DecomposeCXDirected(arc).apply(circ) + assert circ.valid_connectivity(arc, True) + assert len(circ.get_commands()) == 40 + + +def test_noncontiguous_DefaultMappingPass_arc() -> None: + arc = Architecture([[0, 2]]) + pass1 = DefaultMappingPass(arc) + c = Circuit(2) + pass1.apply(c) + + +def test_RoutingPass() -> None: + arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) + circ = Circuit(5) + circ.CX(0, 1) + circ.CX(0, 3) + circ.CX(2, 4) + circ.CX(1, 4) + circ.CX(1, 3) + circ.CX(1, 2) + cu_0 = CompilationUnit(circ) + placer = GraphPlacement(arc) + p_pass = PlacementPass(placer) + r_pass_0 = RoutingPass(arc) + p_pass.apply(cu_0) + r_pass_0.apply(cu_0) + out_circ_0 = cu_0.circuit + assert out_circ_0.valid_connectivity(arc, False, True) + + +def test_FullMappingPass() -> None: + arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) + circ = Circuit(5) + circ.CX(0, 1).CX(0, 3).CX(2, 4).CX(1, 4).CX(0, 4).CX(2, 1).CX(3, 0) + cu_0 = CompilationUnit(circ) + cu_1 = CompilationUnit(circ) + gp_placer = GraphPlacement(arc) + lp_placer = LinePlacement(arc) + m_pass_0 = FullMappingPass(arc, gp_placer, [LexiRouteRoutingMethod()]) + m_pass_1 = FullMappingPass(arc, lp_placer, [LexiRouteRoutingMethod()]) + m_pass_0.apply(cu_0) + m_pass_1.apply(cu_1) + out_circ_0 = cu_0.circuit + out_circ_1 = cu_1.circuit + assert out_circ_0.n_gates < out_circ_1.n_gates + assert out_circ_0.valid_connectivity(arc, False, True) + assert out_circ_1.valid_connectivity(arc, False, True) + + +def test_CXMappingPass() -> None: + arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) + circ = Circuit(5) + circ.Y(4).CX(0, 1).S(3).CX(0, 3).H(0).CX(2, 4).CX(1, 4).Y(1).CX(0, 4).CX(2, 1).Z( + 2 + ).CX(3, 0).CX(2, 0).CX(1, 3) + circ.measure_all() + cu_0 = CompilationUnit(circ) + cu_1 = CompilationUnit(circ) + gp_placer = GraphPlacement(arc) + lp_placer = LinePlacement(arc) + m_pass_0 = CXMappingPass( + arc, gp_placer, swap_lookahead=10, bridge_interactions=10, directed_cx=True + ) + m_pass_1 = CXMappingPass(arc, lp_placer, delay_measures=False) + m_pass_0.apply(cu_0) + m_pass_1.apply(cu_1) + out_circ_0 = cu_0.circuit + out_circ_1 = cu_1.circuit + + measure_pred = NoMidMeasurePredicate() + assert measure_pred.verify(cu_0.circuit) == True + assert measure_pred.verify(cu_1.circuit) == False + assert out_circ_0.valid_connectivity(arc, True) + assert out_circ_1.valid_connectivity(arc, False) + + +def test_DefaultMappingPass() -> None: + arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) + circ = Circuit(5) + circ.Y(4).CX(0, 1).S(3).CX(0, 3).H(0).CX(2, 4).CX(1, 4).Y(1).CX(0, 4).CX(2, 1).Z( + 2 + ).CX(3, 0).CX(2, 0).CX(1, 3).CX(1, 2) + circ.measure_all() + cu_0 = CompilationUnit(circ) + cu_1 = CompilationUnit(circ) + m_pass_0 = DefaultMappingPass(arc, delay_measures=True) + m_pass_1 = DefaultMappingPass(arc, delay_measures=False) + m_pass_0.apply(cu_0) + m_pass_1.apply(cu_1) + out_circ_0 = cu_0.circuit + out_circ_1 = cu_1.circuit + measure_pred = NoMidMeasurePredicate() + assert measure_pred.verify(out_circ_0) == True + assert measure_pred.verify(out_circ_1) == False + assert out_circ_0.valid_connectivity(arc, False, True) + assert out_circ_1.valid_connectivity(arc, False, True) + + +def test_CXMappingPass_correctness() -> None: + # TKET-1045 + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + placer = NoiseAwarePlacement(arc) + p = CXMappingPass(arc, placer, directed_cx=True, delay_measures=True) + c = Circuit(3).CX(0, 1).CX(1, 2).CCX(2, 1, 0).CY(1, 0).CY(2, 1) + cu = CompilationUnit(c) + p.apply(cu) + c1 = cu.circuit + u1 = c1.get_unitary() + assert all(np.isclose(abs(x), 0) or np.isclose(abs(x), 1) for x in u1.flatten()) + + +def test_CXMappingPass_terminates() -> None: + # TKET-1376 + c = circuit_from_qasm( + Path(__file__).resolve().parent / "qasm_test_files" / "test13.qasm" + ) + arc = Architecture( + [ + [0, 1], + [1, 0], + [1, 2], + [1, 4], + [2, 1], + [2, 3], + [3, 2], + [3, 5], + [4, 1], + [4, 7], + [5, 3], + [5, 8], + [6, 7], + [7, 4], + [7, 6], + [7, 10], + [8, 5], + [8, 9], + [8, 11], + [9, 8], + [10, 7], + [10, 12], + [11, 8], + [11, 14], + [12, 10], + [12, 13], + [12, 15], + [13, 12], + [13, 14], + [14, 11], + [14, 13], + [14, 16], + [15, 12], + [15, 18], + [16, 14], + [16, 19], + [17, 18], + [18, 15], + [18, 17], + [18, 21], + [19, 16], + [19, 20], + [19, 22], + [20, 19], + [21, 18], + [21, 23], + [22, 19], + [22, 25], + [23, 21], + [23, 24], + [24, 23], + [24, 25], + [25, 22], + [25, 24], + [25, 26], + [26, 25], + ] + ) + placer = NoiseAwarePlacement(arc) + placer.modify_config(timeout=10000) + p = CXMappingPass(arc, placer, directed_cx=False, delay_measures=False) + res = p.apply(c) + assert res + + if __name__ == "__main__": test_remove_redundancies() test_reduce_singles() @@ -746,3 +967,11 @@ def test_full_peephole_optimise() -> None: test_implicit_swaps_1() test_implicit_swaps_2() test_implicit_swaps_3() + test_decompose_swap_to_cx() + test_noncontiguous_DefaultMappingPass_arc() + test_RoutingPass() + test_DefaultMappingPass() + test_CXMappingPass() + test_CXMappingPass_correctness() + test_CXMappingPass_terminates() + test_FullMappingPass() From f10be47fb4428ced905e9977fbddcd67ea2f4fc8 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 16:44:55 +0000 Subject: [PATCH 101/146] update routing test coverage --- .../tests/test_ArchitectureAwareSynthesis.cpp | 372 ++++++++++++++++++ tket/tests/test_LexiRoute.cpp | 246 +++++++++++- tket/tests/test_LexicographicalComparison.cpp | 14 + tket/tests/test_MappingFrontier.cpp | 14 + tket/tests/test_MappingManager.cpp | 14 + tket/tests/test_MappingVerification.cpp | 113 ++++++ tket/tests/test_MultiGateReorder.cpp | 13 + tket/tests/test_RoutingPasses.cpp | 358 +++++++++++++++++ tket/tests/tkettestsfiles.cmake | 3 + 9 files changed, 1146 insertions(+), 1 deletion(-) create mode 100644 tket/tests/test_ArchitectureAwareSynthesis.cpp create mode 100644 tket/tests/test_MappingVerification.cpp create mode 100644 tket/tests/test_RoutingPasses.cpp diff --git a/tket/tests/test_ArchitectureAwareSynthesis.cpp b/tket/tests/test_ArchitectureAwareSynthesis.cpp new file mode 100644 index 0000000000..6737af45c3 --- /dev/null +++ b/tket/tests/test_ArchitectureAwareSynthesis.cpp @@ -0,0 +1,372 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "Architecture/Architecture.hpp" +#include "Predicates/CompilerPass.hpp" +#include "Predicates/PassGenerators.hpp" +#include "testutil.hpp" +#include "Simulation/CircuitSimulator.hpp" +#include "Simulation/ComparisonFunctions.hpp" + +namespace tket{ +using Connection = Architecture::Connection; +SCENARIO("Routing of aas example") { + GIVEN("aas routing - simple example") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example II") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example III") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example IV") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::Rz, 0.1, {2}); + circ.add_op(OpType::Rz, 0.1, {3}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example V") { + Architecture arc(std::vector{{Node(0), Node(1)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(2); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example VI") { + Architecture arc(std::vector{{Node(0), Node(2)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(2); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + + CompilationUnit cu(circ); + + REQUIRE(pass->apply(cu)); + + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + + const auto s = tket_sim::get_unitary(circ); + const auto s1 = tket_sim::get_unitary(result); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + s, s1, tket_sim::MatrixEquivalence::EQUAL)); + } + GIVEN("aas routing - simple example VII") { + Architecture arc(std::vector{ + {Node(0), Node(2)}, {Node(2), Node(4)}, {Node(4), Node(6)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::Rz, 0.1, {2}); + circ.add_op(OpType::Rz, 0.1, {3}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + + REQUIRE(pass->apply(cu)); + + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + + const auto s = tket_sim::get_unitary(circ); + const auto s1 = tket_sim::get_unitary(result); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + s, s1, tket_sim::MatrixEquivalence::EQUAL)); + } + GIVEN("aas routing - simple example VIII") { + Architecture arc(std::vector{ + {Node(1000), Node(10)}, {Node(10), Node(100)}, {Node(100), Node(1)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::Rz, 0.1, {2}); + circ.add_op(OpType::Rz, 0.1, {3}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + + REQUIRE(pass->apply(cu)); + + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example IX, other gate set") { + Architecture arc(std::vector{ + {Node(1000), Node(10)}, {Node(10), Node(100)}, {Node(100), Node(1)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + circ.add_op(OpType::X, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::Rz, 0.1, {2}); + circ.add_op(OpType::Rz, 0.1, {3}); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + circ.add_op(OpType::X, {3}); + + CompilationUnit cu(circ); + + REQUIRE(pass->apply(cu)); + + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing with measure") { + Architecture arc(std::vector{{Node(0), Node(2)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(2, 2); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + for (unsigned mes = 0; mes < 2; ++mes) { + circ.add_measure(mes, mes); + } + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + } + GIVEN("aas routing - circuit with fewer qubits then nodes in the arch") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(3); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.2, {1}); + circ.add_op(OpType::Rz, 0.3, {2}); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - circuit with fewer qubits then nodes in the arch II") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, + {Node(1), Node(2)}, + {Node(2), Node(3)}, + {Node(3), Node(4)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(3); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.2, {1}); + circ.add_op(OpType::Rz, 0.3, {2}); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + } +} +} //namespace tket \ No newline at end of file diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index 1b24da5da9..77a74dee6d 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -1,12 +1,29 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include "Mapping/LexiRoute.hpp" #include "Mapping/MappingManager.hpp" +#include "Mapping/Verification.hpp" #include "Predicates/CompilationUnit.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" #include "Predicates/PassLibrary.hpp" - +// #include "Transformations/Transform.hpp" +#include "Transformations/Decomposition.hpp" +#include "testutil.hpp" namespace tket { SCENARIO("Test LexiRoute::solve") { std::vector nodes = {Node("test_node", 0), Node("test_node", 1), @@ -653,4 +670,231 @@ SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { REQUIRE(circ.n_gates() == 88); } } + +SCENARIO( + "Check that an already solved routing problem will not add unecessary " + "swaps") { + GIVEN("A solved problem") { + Circuit test_circuit; + test_circuit.add_blank_wires(4); + add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {1, 2}, {2, 3}, {3, 0}}); + + // Ring of size 4 + RingArch arc(4); + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + test_circuit, {std::make_shared()})); + REQUIRE(test_circuit.n_gates() == 4); + } + GIVEN("A solved problem supplied with map and custom architecture") { + Circuit test_circuit; + test_circuit.add_blank_wires(4); + add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {1, 2}, {2, 3}, {3, 0}}); + + Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 0}}); + Placement test_p(test_arc); + + qubit_mapping_t map_; + for (unsigned nn = 0; nn <= 3; ++nn) { + map_[Qubit(nn)] = Node(nn); + } + test_p.place_with_map(test_circuit, map_); + qubit_vector_t all_qs_post_place = test_circuit.all_qubits(); + + MappingManager mm(std::make_shared(test_arc)); + REQUIRE(!mm.route_circuit( + test_circuit, {std::make_shared()})); + + qubit_vector_t all_qs_post_solve = test_circuit.all_qubits(); + REQUIRE(all_qs_post_place == all_qs_post_solve); + REQUIRE(test_circuit.n_gates() == 4); + } +} + +SCENARIO("Empty Circuit test") { + GIVEN("An Empty Circuit") { + Circuit circ; + circ.add_blank_wires(4); + Architecture arc({{0, 1}, {1, 2}, {2, 3}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE( + !mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(circ.n_gates() == 0); + } +} + +SCENARIO("Routing on circuit with no multi-qubit gates") { + GIVEN("A circuit with no multi-qubit gates") { + Circuit circ; + circ.add_blank_wires(4); + add_1qb_gates(circ, OpType::X, {0, 2}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::Y, {1}); + + unsigned orig_vertices = circ.n_vertices(); + Architecture arc({{0, 1}, {1, 2}, {2, 3}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE( + !mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(orig_vertices - 8 == circ.n_gates()); + } +} + +SCENARIO("Test routing on a directed architecture with bidirectional edges") { + GIVEN("A simple two-qubit circuit") { + Circuit circ(2); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::CX, {0, 1}); + Architecture arc({{0, 1}, {1, 0}}); + Architecture arc2(std::vector>{{0, 1}}); + + // routing ignored bi directional edge and solves correctly + MappingManager mm(std::make_shared(arc)); + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(circ.n_gates() == 2); + CHECK(respects_connectivity_constraints(circ, arc, false)); + } +} + +SCENARIO( + "Test routing on a directed architecture doesn't throw an error if " + "non-cx optype is presented") { + GIVEN( + "A simple two-qubit circuit with non-cx multi-qubit gates and a " + "directed architecture") { + Circuit circ(2); + circ.add_op(OpType::CU1, 0.5, {1, 0}); + circ.add_op(OpType::CU1, 0.5, {0, 1}); + circ.add_op(OpType::CY, {1, 0}); + circ.add_op(OpType::CY, {0, 1}); + circ.add_op(OpType::CZ, {1, 0}); + circ.add_op(OpType::CZ, {0, 1}); + circ.add_op(OpType::CRz, 0.5, {1, 0}); + circ.add_op(OpType::CRz, 0.5, {0, 1}); + + Architecture arc(std::vector>{{0, 1}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(circ.n_gates() == 8); + } +} + +SCENARIO("Dense CX circuits route succesfully") { + GIVEN( + "Complex CX circuits for large directed architecture based off " + "IBMTokyo") { + Circuit circ(17); + for (unsigned x = 0; x < 17; ++x) { + for (unsigned y = 0; y + 1 < x; ++y) { + if (x % 2) { // swap the way directed chain runs each time + add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); + } else { + add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); + } + } + } + Architecture arc( + {{0, 1}, {1, 2}, {2, 3}, {3, 4}, {0, 5}, {1, 6}, {1, 7}, + {2, 6}, {2, 7}, {3, 8}, {3, 9}, {4, 8}, {4, 9}, {5, 6}, + {5, 10}, {5, 11}, {6, 10}, {6, 11}, {6, 7}, {7, 12}, {7, 13}, + {7, 8}, {8, 12}, {8, 13}, {8, 9}, {10, 11}, {11, 16}, {11, 17}, + {11, 12}, {12, 16}, {12, 17}, {12, 13}, {13, 18}, {13, 19}, {13, 14}, + {14, 18}, {14, 19}, {15, 16}, {16, 17}, {17, 18}, {18, 19}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); + (Transforms::decompose_SWAP_to_CX() >> Transforms::decompose_BRIDGE_to_CX()) + .apply(circ); + + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } +} + +SCENARIO( + "Dense CX circuits route succesfully on undirected Ring with " + "placement.") { + GIVEN("Complex CX circuits, big ring") { + Circuit circ(29); + for (unsigned x = 0; x < 29; ++x) { + for (unsigned y = 0; y + 1 < x; ++y) { + if (x % 2) { + add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); + } else { + add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); + } + } + } + RingArch arc(29); + MappingManager mm(std::make_shared(arc)); + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + } +} + +SCENARIO( + "Dense CX circuits route succesfully on smart placement unfriendly " + "architecture.") { + GIVEN("Complex CX circuits, big ring") { + Circuit circ(13); + for (unsigned x = 0; x < 13; ++x) { + for (unsigned y = 0; y + 1 < x; ++y) { + if (x % 2) { + add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); + } else { + add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); + } + } + } + Architecture arc( + {{0, 1}, + {2, 0}, + {2, 4}, + {6, 4}, + {8, 6}, + {8, 10}, + {12, 10}, + {3, 1}, + {3, 5}, + {7, 5}, + {7, 9}, + {11, 9}, + {11, 13}, + {12, 13}, + {6, 7}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + } +} + +SCENARIO("Empty circuits, with and without blank wires") { + GIVEN("An empty circuit with some qubits") { + Circuit circ(6); + RingArch arc(6); + MappingManager mm(std::make_shared(arc)); + REQUIRE( + !mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(circ.depth() == 0); + REQUIRE(circ.n_gates() == 0); + REQUIRE(circ.n_qubits() == 6); + REQUIRE(!respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("An empty circuit with no qubits") { + Circuit circ(0); + RingArch arc(6); + MappingManager mm(std::make_shared(arc)); + REQUIRE( + !mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(circ.depth() == 0); + REQUIRE(circ.n_gates() == 0); + REQUIRE(circ.n_qubits() == 0); + } +} + } // namespace tket \ No newline at end of file diff --git a/tket/tests/test_LexicographicalComparison.cpp b/tket/tests/test_LexicographicalComparison.cpp index fc421d2202..53f26a5973 100644 --- a/tket/tests/test_LexicographicalComparison.cpp +++ b/tket/tests/test_LexicographicalComparison.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include diff --git a/tket/tests/test_MappingFrontier.cpp b/tket/tests/test_MappingFrontier.cpp index bb33f0f095..414e807c81 100644 --- a/tket/tests/test_MappingFrontier.cpp +++ b/tket/tests/test_MappingFrontier.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include diff --git a/tket/tests/test_MappingManager.cpp b/tket/tests/test_MappingManager.cpp index c28bf04278..6e9c03a4e9 100644 --- a/tket/tests/test_MappingManager.cpp +++ b/tket/tests/test_MappingManager.cpp @@ -1,3 +1,17 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include #include diff --git a/tket/tests/test_MappingVerification.cpp b/tket/tests/test_MappingVerification.cpp new file mode 100644 index 0000000000..ce9be2d3c1 --- /dev/null +++ b/tket/tests/test_MappingVerification.cpp @@ -0,0 +1,113 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "Mapping/MappingManager.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/Verification.hpp" +#include "Placement/Placement.hpp" +#include "testutil.hpp" + +namespace tket{ +SCENARIO( + "Test validity of circuit against architecture using " + "respects_connectivity_constraints method.", + "[routing]") { + Architecture arc({{1, 0}, {1, 2}}); + + GIVEN("A simple CX circuit and a line_placement map.") { + Circuit circ(5); + add_2qb_gates(circ, OpType::CX, {{0, 1}, {0, 3}, {2, 4}, {1, 4}, {0, 4}}); + Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}}); + LinePlacement lp_obj(test_arc); + lp_obj.place(circ); + MappingManager mm(std::make_shared(test_arc)); + REQUIRE(mm.route_circuit(circ, {std::make_shared()})); + CHECK(respects_connectivity_constraints(circ, test_arc, false)); + } + GIVEN("A failing case, undirected") { + Circuit circ(3); + circ.add_op(OpType::CX, {0, 2}); + reassign_boundary(circ); + REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, false)); + } + GIVEN("A working case, undirected") { + Circuit circ(3); + circ.add_op(OpType::CX, {0, 1}); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false)); + } + GIVEN("A failing case, directed") { + Circuit circ(3); + circ.add_op(OpType::CX, {0, 1}); + reassign_boundary(circ); + REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A working case, directed") { + Circuit circ(3); + circ.add_op(OpType::CX, {1, 0}); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A failing case, undirected, with SWAP") { + Circuit circ(3); + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 2}); + + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {0, 1}); + reassign_boundary(circ); + REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, false)); + } + GIVEN("A working case, undirected, with SWAP") { + Circuit circ(3); + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 2}); + + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {0, 2}); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false)); + } + GIVEN("A failing case, directed, with SWAP") { + Circuit circ(3); + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); + + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {1, 0}); + reassign_boundary(circ); + REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A working case, directed, with SWAP") { + Circuit circ(3); + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); + + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {0, 1}); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false)); + } +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index 1409ce23e3..a51d569794 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -1,3 +1,16 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include #include "Mapping/LexiRoute.hpp" diff --git a/tket/tests/test_RoutingPasses.cpp b/tket/tests/test_RoutingPasses.cpp new file mode 100644 index 0000000000..5065916e42 --- /dev/null +++ b/tket/tests/test_RoutingPasses.cpp @@ -0,0 +1,358 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "Characterisation/DeviceCharacterisation.hpp" +#include "Circuit/Circuit.hpp" +#include "OpType/OpType.hpp" +#include "Predicates/CompilerPass.hpp" +#include "Predicates/PassGenerators.hpp" +#include "Predicates/Predicates.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/Verification.hpp" +#include "Simulation/CircuitSimulator.hpp" +#include "Simulation/ComparisonFunctions.hpp" +#include "Transformations/BasicOptimisation.hpp" +#include "Transformations/Decomposition.hpp" +#include "Transformations/OptimisationPass.hpp" +#include "Transformations/Rebase.hpp" +#include "Transformations/Transform.hpp" +#include "Utils/HelperFunctions.hpp" +#include "testutil.hpp" + +namespace tket { + +using Connection = Architecture::Connection; + +SCENARIO("Test decompose_SWAP_to_CX pass", "[routing]") { + Architecture arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 0}}); + GIVEN("A single SWAP gate. Finding if correct number of vertices added") { + Circuit circ(5); + circ.add_op(OpType::SWAP, {0, 1}); + int original_vertices = circ.n_vertices(); + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX().apply(circ); + int decompose_vertices = circ.n_vertices(); + REQUIRE(decompose_vertices - original_vertices == 2); + REQUIRE(respects_connectivity_constraints(circ, arc, false)); + } + GIVEN("A single SWAP gate, finding if correct path is preserved.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {0, 1}); + // check output boundary + Vertex boundary_0 = circ.get_out(Qubit(0)); + Vertex boundary_1 = circ.get_out(Qubit(1)); + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(circ.get_out(Qubit(0)) == boundary_0); + REQUIRE(circ.get_out(Qubit(1)) == boundary_1); + // check output boundary is the same + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {0, 1}); + circ.add_op(OpType::CX, {0, 1}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[0], all[1]}; + REQUIRE(circ.get_commands()[2].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite case.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {0, 1}); + circ.add_op(OpType::CX, {1, 0}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[1], all[0]}; + REQUIRE(circ.get_commands()[2].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite SWAP.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {1, 0}); + circ.add_op(OpType::CX, {0, 1}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[0], all[1]}; + REQUIRE(circ.get_commands()[2].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite case, opposite SWAP.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {1, 0}); + circ.add_op(OpType::CX, {1, 0}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[1], all[0]}; + REQUIRE(circ.get_commands()[2].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite SWAP, pre CX.") { + Circuit circ(2); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::SWAP, {1, 0}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[0], all[1]}; + REQUIRE(circ.get_commands()[1].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite case, opposite SWAP, pre CX.") { + Circuit circ(2); + circ.add_op(OpType::CX, {1, 0}); + circ.add_op(OpType::SWAP, {1, 0}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[1], all[0]}; + REQUIRE(circ.get_commands()[1].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite case, opposite SWAP, pre CX, directed bool " + "on.") { + Circuit circ(2); + circ.add_op(OpType::CX, {1, 0}); + circ.add_op(OpType::SWAP, {1, 0}); + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX(arc).apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[1], all[0]}; + REQUIRE(circ.get_commands()[1].get_args() == cor); + } + GIVEN("A circuit that with no CX gates, but with directed architecture.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {1, 0}); + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX(arc).apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[0], all[1]}; + REQUIRE(circ.get_commands()[0].get_args() == cor); + } + GIVEN( + "A circuit that with no CX gates, but with directed architecture, " + "opposite case.") { + Architecture dummy_arc({{1, 0}}); + Circuit circ(2); + circ.add_op(OpType::SWAP, {1, 0}); + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX(dummy_arc).apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[1], all[0]}; + REQUIRE(circ.get_commands()[0].get_args() == cor); + } + // TEST CIRCUIT + Circuit circ(10); + int count = 0; + for (unsigned x = 0; x < 10; ++x) { + for (unsigned y = 0; y + 1 < x; ++y) { + count += 2; + if (x % 2) { + add_2qb_gates(circ, OpType::SWAP, {{x, y}, {y + 1, y}}); + } else { + add_2qb_gates(circ, OpType::SWAP, {{y, x}, {y, y + 1}}); + } + } + } + + GIVEN("A network of SWAP gates.") { + int original_vertices = circ.n_vertices(); + std::vector original_boundary; + for (unsigned i = 0; i < circ.n_qubits(); i++) { + original_boundary.push_back(circ.get_out(Qubit(i))); + } + Transforms::decompose_SWAP_to_CX().apply(circ); + int decompose_vertices = circ.n_vertices(); + for (unsigned i = 0; i < circ.n_qubits(); i++) { + REQUIRE(original_boundary[i] == circ.get_out(Qubit(i))); + } + REQUIRE(decompose_vertices - original_vertices == 2 * count); + } + GIVEN("A routed network of SWAP gates.") { + SquareGrid grid(2, 5); + MappingManager mm(std::make_shared(grid)); + REQUIRE(mm.route_circuit(circ, {std::make_shared()})); + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, grid, false, true)); + GIVEN("Directed CX gates") { + Transforms::decompose_SWAP_to_CX().apply(circ); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + Transforms::decompose_CX_directed(grid).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, grid, true)); + } + } +} + +SCENARIO("Test redirect_CX_gates pass", "[routing]") { + Architecture arc({{1, 0}, {1, 2}}); + GIVEN("A circuit that requires no redirection.") { + Circuit circ(3); + add_2qb_gates(circ, OpType::CX, {{1, 0}, {1, 2}}); + reassign_boundary(circ); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A circuit that requires redirection.") { + Circuit circ(3); + add_2qb_gates(circ, OpType::CX, {{0, 1}, {2, 1}}); + reassign_boundary(circ); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A circuit that requires no redirection, with SWAP.") { + Circuit circ(3); + + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {0, 1}); + + swap_v = circ.add_op(OpType::SWAP, {0, 2}); + swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {2, 1}); + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX(arc).apply(circ); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A circuit that requires redirection, with SWAP.") { + Circuit circ(3); + + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {1, 0}); + + swap_v = circ.add_op(OpType::SWAP, {0, 2}); + swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {1, 2}); + + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX(arc).apply(circ); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A complicated circuit of CX gates, routed.") { + Circuit circ(12); + SquareGrid grid(3, 4); + + for (unsigned x = 0; x < 12; ++x) { + for (unsigned y = 0; y + 1 < x; ++y) { + if (x % 2) { + add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); + } else { + add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); + } + } + } + MappingManager mm(std::make_shared(grid)); + REQUIRE(mm.route_circuit(circ, {std::make_shared()})); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + Transforms::decompose_SWAP_to_CX(arc).apply(circ); + Transforms::decompose_CX_directed(grid).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, grid, true)); + } +} + + + +SCENARIO("Routing preserves the number of qubits") { + std::vector> cons; + cons.push_back({Node("x", 1), Node("x", 0)}); + cons.push_back({Node("x", 2), Node("x", 1)}); + Architecture arc( + std::vector>(cons.begin(), cons.end())); + PassPtr pass = gen_default_mapping_pass(arc, false); + Circuit c(3); + c.add_op(OpType::CnX, {2, 1}); + CompilationUnit cu(c); + bool applied = pass->apply(cu); + const Circuit &c1 = cu.get_circ_ref(); + REQUIRE(c.n_qubits() == c1.n_qubits()); +} + +SCENARIO("Default mapping pass delays measurements") { + std::vector> cons; + cons.push_back({Node("x", 0), Node("x", 2)}); + cons.push_back({Node("x", 1), Node("x", 2)}); + cons.push_back({Node("x", 2), Node("x", 3)}); + cons.push_back({Node("x", 3), Node("x", 0)}); + Architecture arc( + std::vector>(cons.begin(), cons.end())); + PassPtr pass = gen_default_mapping_pass(arc, false); + Circuit c(4, 4); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {1, 2}); + c.add_op(OpType::CX, {2, 3}); + c.add_op(OpType::CX, {3, 0}); + for (unsigned nn = 0; nn <= 3; ++nn) { + c.add_measure(nn, nn); + } + Circuit c2(c); + CompilationUnit cu(c); + REQUIRE(pass->apply(cu)); + CompilationUnit cu2(c2); + // delay_measures is default to true + PassPtr pass2 = gen_default_mapping_pass(arc); + REQUIRE(pass2->apply(cu2)); + PredicatePtr mid_meas_pred = std::make_shared(); + REQUIRE(!mid_meas_pred->verify(cu.get_circ_ref())); + REQUIRE(mid_meas_pred->verify(cu2.get_circ_ref())); +} +SCENARIO( + "Does copying decompose_SWAP_to_CX pass and applying it to a routed " + "Circuit work correctly?") { + GIVEN("A simple circuit and architecture.") { + Circuit circ(5); + add_2qb_gates( + circ, OpType::CX, + {{0, 3}, + {1, 4}, + {0, 1}, + {2, 0}, + {2, 1}, + {1, 0}, + {0, 4}, + {2, 1}, + {0, 3}}); + Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit(circ, {std::make_shared()})); + + Transform T_1 = Transforms::decompose_SWAP_to_CX(); + T_1.apply(circ); + REQUIRE(circ.count_gates(OpType::SWAP) == 0); + } +} +} // namespace tket diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 6339ef1224..7df25c419d 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -92,13 +92,16 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/test_PhasePolynomials.cpp ${TKET_TESTS_DIR}/test_PauliGraph.cpp ${TKET_TESTS_DIR}/test_Architectures.cpp + ${TKET_TESTS_DIR}/test_ArchitectureAwareSynthesis.cpp ${TKET_TESTS_DIR}/test_Placement.cpp + ${TKET_TESTS_DIR}/test_MappingVerification.cpp ${TKET_TESTS_DIR}/test_MappingFrontier.cpp ${TKET_TESTS_DIR}/test_RoutingMethod.cpp ${TKET_TESTS_DIR}/test_MappingManager.cpp ${TKET_TESTS_DIR}/test_LexicographicalComparison.cpp ${TKET_TESTS_DIR}/test_LexiRoute.cpp ${TKET_TESTS_DIR}/test_MultiGateReorder.cpp + ${TKET_TESTS_DIR}/test_RoutingPasses.cpp ${TKET_TESTS_DIR}/test_DeviceCharacterisation.cpp ${TKET_TESTS_DIR}/test_Clifford.cpp ${TKET_TESTS_DIR}/test_MeasurementSetup.cpp From 09fcd8af5f594f8d0574dd81d0c4f540a5c42f6f Mon Sep 17 00:00:00 2001 From: cqc-melf <70640934+cqc-melf@users.noreply.github.com> Date: Wed, 16 Feb 2022 17:49:05 +0100 Subject: [PATCH 102/146] add ci run on push on - feature/RV3.1 --- .github/workflows/build_and_test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index e4611cde98..203c8913ef 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -9,6 +9,7 @@ on: push: branches: - develop + - feature/RV3.1 schedule: # 03:00 every Saturday morning - cron: '0 3 * * 6' From 5a3f190ed2ba34da4a03c949e219f96b33f1cf5d Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 17:20:51 +0000 Subject: [PATCH 103/146] fix compilation issues --- tket/src/Architecture/Architecture.cpp | 9 +- tket/src/Architecture/CMakeLists.txt | 1 + .../include/Architecture/Architecture.hpp | 1 - tket/src/Mapping/MappingFrontier.cpp | 3 +- tket/src/Mapping/MultiGateReorder.cpp | 3 +- tket/src/Predicates/PassGenerators.cpp | 2 +- tket/src/TokenSwappingWithArch/CMakeLists.txt | 2 + .../tests/test_ArchitectureAwareSynthesis.cpp | 6 +- tket/tests/test_LexiRoute.cpp | 340 +++--------------- tket/tests/test_MappingVerification.cpp | 9 +- tket/tests/test_RoutingPasses.cpp | 158 +++++++- 11 files changed, 222 insertions(+), 312 deletions(-) diff --git a/tket/src/Architecture/Architecture.cpp b/tket/src/Architecture/Architecture.cpp index 3376750792..db925c99b0 100644 --- a/tket/src/Architecture/Architecture.cpp +++ b/tket/src/Architecture/Architecture.cpp @@ -18,6 +18,7 @@ #include #include +#include "Circuit/Conditional.hpp" #include "Graphs/ArticulationPoints.hpp" #include "Utils/Json.hpp" #include "Utils/UnitID.hpp" @@ -27,14 +28,12 @@ namespace tket { // basic implementation that works off same prior assumptions // TODO: Update this for more mature systems of multi-qubit gates bool Architecture::valid_operation( - - const Op_ptr& op, const std::vector& uids) const { if (op->get_desc().is_box() || (op->get_type() == OpType::Conditional && - static_cast(*op).get_op()->get_desc().is_box())) + static_cast(*op).get_op()->get_desc().is_box())) { return false; - } + } if (uids.size() == 1) { // with current Architecture can assume all single qubit gates valid return true; @@ -45,7 +44,7 @@ bool Architecture::valid_operation( this->bidirectional_edge_exists(uids[0], uids[1])) { return true; } - } else if (uids.size() == 3 && optype == OpType::BRIDGE) { + } else if (uids.size() == 3 && op->get_type() == OpType::BRIDGE) { bool con_0_exists = this->bidirectional_edge_exists(uids[0], uids[1]); bool con_1_exists = this->bidirectional_edge_exists(uids[2], uids[1]); if (this->node_exists(uids[0]) && this->node_exists(uids[1]) && diff --git a/tket/src/Architecture/CMakeLists.txt b/tket/src/Architecture/CMakeLists.txt index eabef22756..4213458bdd 100644 --- a/tket/src/Architecture/CMakeLists.txt +++ b/tket/src/Architecture/CMakeLists.txt @@ -30,6 +30,7 @@ list(APPEND DEPS_${COMP} OpType Utils) + foreach(DEP ${DEPS_${COMP}}) target_include_directories( tket-${COMP} PRIVATE ${TKET_${DEP}_INCLUDE_DIR}) diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index a7707bcdd8..2760b47c08 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -21,7 +21,6 @@ #include #include -#include "Circuit/Conditional.hpp" #include "Graphs/CompleteGraph.hpp" #include "Graphs/DirectedGraph.hpp" #include "Ops/OpPtr.hpp" diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index 6280efd55f..9e9fb9f3ea 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -242,7 +242,8 @@ void MappingFrontier::advance_frontier_boundary( for (const UnitID& uid : uids) { nodes.push_back(Node(uid)); } - if (architecture->valid_operation(this->circuit_.get_Op_ptr_from_Vertex(vert), nodes)) { + if (architecture->valid_operation( + this->circuit_.get_Op_ptr_from_Vertex(vert), nodes)) { // if no valid operation, boundary not updated and while loop terminates boundary_updated = true; for (const UnitID& uid : uids) { diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index 99ea0a0565..3e0cea53ff 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -67,7 +67,8 @@ static bool is_physically_permitted( for (port_t port = 0; port < frontier->circuit_.n_ports(vert); ++port) { nodes.push_back(Node(get_unitid_from_vertex_port(frontier, {vert, port}))); } - return arc_ptr->valid_operation(frontier->circuit_.get_Op_ptr_from_Vertex(vert), nodes); + return arc_ptr->valid_operation( + frontier->circuit_.get_Op_ptr_from_Vertex(vert), nodes); } // This method will try to commute a vertex to the quantum frontier diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index e3e2dcb2f2..d37eb925c4 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -196,7 +196,7 @@ PassPtr gen_default_mapping_pass(const Architecture& arc, bool delay_measures) { PassPtr return_pass = gen_full_mapping_pass( arc, std::make_shared(arc), {std::make_shared(), - std::make_shared(100)}) + std::make_shared(100)}); if (delay_measures) { return_pass = return_pass >> DelayMeasures(); } diff --git a/tket/src/TokenSwappingWithArch/CMakeLists.txt b/tket/src/TokenSwappingWithArch/CMakeLists.txt index b975711762..96f84066be 100644 --- a/tket/src/TokenSwappingWithArch/CMakeLists.txt +++ b/tket/src/TokenSwappingWithArch/CMakeLists.txt @@ -26,8 +26,10 @@ add_library(tket-${COMP} ) list(APPEND DEPS_${COMP} + Circuit Architecture Graphs + Ops OpType TokenSwapping Utils) diff --git a/tket/tests/test_ArchitectureAwareSynthesis.cpp b/tket/tests/test_ArchitectureAwareSynthesis.cpp index 6737af45c3..e6ca39ece5 100644 --- a/tket/tests/test_ArchitectureAwareSynthesis.cpp +++ b/tket/tests/test_ArchitectureAwareSynthesis.cpp @@ -17,11 +17,11 @@ #include "Architecture/Architecture.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" -#include "testutil.hpp" #include "Simulation/CircuitSimulator.hpp" #include "Simulation/ComparisonFunctions.hpp" +#include "testutil.hpp" -namespace tket{ +namespace tket { using Connection = Architecture::Connection; SCENARIO("Routing of aas example") { GIVEN("aas routing - simple example") { @@ -369,4 +369,4 @@ SCENARIO("Routing of aas example") { REQUIRE(test_unitary_comparison(circ, result)); } } -} //namespace tket \ No newline at end of file +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index e59600958d..de0b430969 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -14,20 +14,18 @@ #include -#include "Mapping/LexiLabelling.hpp" #include "Mapping/LexiRoute.hpp" +#include "Mapping/LexiLabelling.hpp" #include "Mapping/MappingManager.hpp" #include "Mapping/Verification.hpp" #include "Predicates/CompilationUnit.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" #include "Predicates/PassLibrary.hpp" -// #include "Transformations/Transform.hpp" #include "Transformations/Decomposition.hpp" #include "testutil.hpp" namespace tket { - -SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { +SCENARIO("Test LexiRoute::solve") { std::vector nodes = {Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), Node("node_test", 3), Node("node_test", 4), Node("node_test", 5), @@ -94,8 +92,8 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::shared_ptr mf0 = std::make_shared(circ); LexiRoute lr(shared_arc, mf0); - lr.solve_labelling(); - // lr.solve(4); + + lr.solve(4); REQUIRE(mf0->circuit_.n_gates() == 3); @@ -105,9 +103,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::shared_ptr mf1 = std::make_shared(circ); LexiRoute lr1(shared_arc, mf1); - // lr1.solve_labelling(); lr1.solve(4); - std::vector commands = mf1->circuit_.get_commands(); Command swap_c = commands[1]; unit_vector_t uids = {nodes[1], nodes[2]}; @@ -159,8 +155,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr0(shared_arc, mf); - lr0.solve_labelling(); - // lr0.solve(20); + lr0.solve(20); std::vector commands = mf->circuit_.get_commands(); REQUIRE(commands.size() == 4); Command c = commands[0]; @@ -169,19 +164,19 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { mf->advance_frontier_boundary(shared_arc); LexiRoute lr1(shared_arc, mf); - lr1.solve_labelling(); + lr1.solve(20); uids = {nodes[2], nodes[3]}; REQUIRE(mf->circuit_.get_commands()[1].get_args() == uids); mf->advance_frontier_boundary(shared_arc); LexiRoute lr2(shared_arc, mf); - lr2.solve_labelling(); + lr2.solve(20); uids = {nodes[2], nodes[5]}; REQUIRE(mf->circuit_.get_commands()[2].get_args() == uids); mf->advance_frontier_boundary(shared_arc); LexiRoute lr3(shared_arc, mf); - lr3.solve_labelling(); + lr3.solve(20); uids = {nodes[5], nodes[6]}; REQUIRE(mf->circuit_.get_commands()[3].get_args() == uids); } @@ -305,7 +300,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { mf->advance_frontier_boundary(shared_arc); LexiRoute lr1(shared_arc, mf); - lr1.solve_labelling(); + lr1.solve(20); REQUIRE(circ.all_qubits()[0] == nodes[3]); } @@ -341,7 +336,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); LexiRoute lr0(shared_arc, mf); - lr0.solve_labelling(); + lr0.solve(20); mf->advance_frontier_boundary(shared_arc); LexiRoute lr1(shared_arc, mf); @@ -377,9 +372,8 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::make_shared(circ); mf->ancilla_nodes_.insert(nodes[3]); mf->advance_frontier_boundary(shared_arc); - LexiRoute lr0(shared_arc, mf); - lr0.solve_labelling(); + lr0.solve(20); REQUIRE(circ.all_qubits()[1] == nodes[4]); REQUIRE(circ.all_qubits()[0] == nodes[3]); @@ -417,8 +411,8 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); } GIVEN( - "Labelling is required, but there are no free remaining qubits, for" - "one updated label, order 0.") { + "Labelling is required, but there are no free remaining qubits, for one " + "updated label, order 0.") { Circuit circ(9); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[1], qubits[8]}); @@ -435,11 +429,11 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); - REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); + REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); } GIVEN( - "Labelling is required, but there are no free remaining qubits, for " - " one updated label, order 1.") { + "Labelling is required, but there are no free remaining qubits, for one " + "updated label, order 1.") { Circuit circ(9); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[1], qubits[8]}); @@ -456,11 +450,11 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); - REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); + REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); } GIVEN( - "Labelling is required, but there are no free remaining qubits, for" - "two updated labels.") { + "Labelling is required, but there are no free remaining qubits, for two " + "updated labels.") { Circuit circ(10); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[9], qubits[8]}); @@ -477,244 +471,10 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); - REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); + REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); } } -SCENARIO("Test LexiLabellingMethod") { - std::vector nodes = { - Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), - Node("node_test", 3), Node("node_test", 4)}; - - // straight line - Architecture architecture( - {{nodes[0], nodes[1]}, - {nodes[1], nodes[2]}, - {nodes[2], nodes[3]}, - {nodes[3], nodes[4]}}); - ArchitecturePtr shared_arc = std::make_shared(architecture); - GIVEN("No qubit to label, empty frontier, check_method.") { - Circuit circ(5); - std::shared_ptr mf = - std::make_shared(circ); - LexiLabellingMethod lrm; - REQUIRE(!lrm.check_method(mf, shared_arc)); - } - GIVEN("No qubit to label, partially filled frontier, check_method.") { - Circuit circ(5); - std::vector qubits = circ.all_qubits(); - circ.add_op(OpType::CX, {qubits[0], qubits[4]}); - circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); - circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[0]}); - std::map rename_map = { - {qubits[0], nodes[0]}, - {qubits[1], nodes[1]}, - {qubits[2], nodes[2]}, - {qubits[3], nodes[3]}, - {qubits[4], nodes[4]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - LexiLabellingMethod lrm; - REQUIRE(!lrm.check_method(mf, shared_arc)); - } - GIVEN("Qubit to label, but casually restricted, check_method.") { - Circuit circ(5); - std::vector qubits = circ.all_qubits(); - circ.add_op(OpType::CX, {qubits[0], qubits[4]}); - circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); - circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[0]}); - std::map rename_map = { - {qubits[0], nodes[0]}, - {qubits[1], nodes[1]}, - {qubits[2], nodes[2]}, - {qubits[4], nodes[4]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - LexiLabellingMethod lrm; - REQUIRE(!lrm.check_method(mf, shared_arc)); - } - GIVEN( - "Two Qubit to label in future slice, causally restricted, " - "check_method.") { - Circuit circ(5); - std::vector qubits = circ.all_qubits(); - circ.add_op(OpType::CX, {qubits[0], qubits[1]}); - circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); - circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); - circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[4]}); - std::map rename_map = { - {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - LexiLabellingMethod lrm; - REQUIRE(!lrm.check_method(mf, shared_arc)); - } - GIVEN("Three Qubit Gate, all labelled, first slice, check_method.") { - Circuit circ(5); - std::vector qubits = circ.all_qubits(); - circ.add_op(OpType::CX, {qubits[0], qubits[4]}); - circ.add_op(OpType::CCX, {qubits[1], qubits[2], qubits[3]}); - std::map rename_map = { - {qubits[0], nodes[0]}, - {qubits[1], nodes[1]}, - {qubits[2], nodes[2]}, - {qubits[3], nodes[3]}, - {qubits[4], nodes[4]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - LexiLabellingMethod lrm; - REQUIRE(!lrm.check_method(mf, shared_arc)); - } - GIVEN("One unlabelled qubit, one slice, check and route.") { - Circuit circ(5); - std::vector qubits = circ.all_qubits(); - circ.add_op(OpType::CX, {qubits[0], qubits[1]}); - circ.add_op(OpType::CX, {qubits[2], qubits[3]}); - std::map rename_map = { - {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - VertPort pre_label = - mf->quantum_boundary->get().find(qubits[3])->second; - LexiLabellingMethod lrm; - REQUIRE(lrm.check_method(mf, shared_arc)); - lrm.routing_method(mf, shared_arc); - REQUIRE( - mf->quantum_boundary->get().find(qubits[3]) == - mf->quantum_boundary->get().end()); - VertPort post_label = - mf->quantum_boundary->get().find(nodes[3])->second; - REQUIRE(pre_label == post_label); - } - GIVEN( - "One unlabelled qubit, two slices, lookahead for better solution, check" - " and route.") { - Circuit circ(5); - std::vector qubits = circ.all_qubits(); - circ.add_op(OpType::CX, {qubits[0], qubits[1]}); - circ.add_op(OpType::ZZPhase, 0.8, {qubits[2], qubits[3]}); - circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); - - std::map rename_map = { - {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[3], nodes[3]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - VertPort pre_label = - mf->quantum_boundary->get().find(qubits[2])->second; - LexiLabellingMethod lrm; - REQUIRE(lrm.check_method(mf, shared_arc)); - lrm.routing_method(mf, shared_arc); - REQUIRE( - mf->quantum_boundary->get().find(qubits[2]) == - mf->quantum_boundary->get().end()); - VertPort post_label = - mf->quantum_boundary->get().find(nodes[2])->second; - REQUIRE(pre_label == post_label); - } - GIVEN("Two unlabelled qubits, one slice, check and route.") { - Circuit circ(5); - std::vector qubits = circ.all_qubits(); - circ.add_op(OpType::CX, {qubits[0], qubits[1]}); - circ.add_op(OpType::ZZPhase, 0.8, {qubits[2], qubits[3]}); - - std::map rename_map = { - {qubits[2], nodes[2]}, {qubits[1], nodes[1]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - VertPort pre_label_0 = - mf->quantum_boundary->get().find(qubits[0])->second; - VertPort pre_label_3 = - mf->quantum_boundary->get().find(qubits[3])->second; - LexiLabellingMethod lrm; - REQUIRE(lrm.check_method(mf, shared_arc)); - lrm.routing_method(mf, shared_arc); - REQUIRE( - mf->quantum_boundary->get().find(qubits[0]) == - mf->quantum_boundary->get().end()); - REQUIRE( - mf->quantum_boundary->get().find(qubits[3]) == - mf->quantum_boundary->get().end()); - VertPort post_label_0 = - mf->quantum_boundary->get().find(nodes[0])->second; - REQUIRE(pre_label_0 == post_label_0); - VertPort post_label_3 = - mf->quantum_boundary->get().find(nodes[3])->second; - REQUIRE(pre_label_3 == post_label_3); - } - GIVEN("Two unlabelled qubits, two slices, lookahead, check and route.") { - Circuit circ(5); - std::vector qubits = circ.all_qubits(); - circ.add_op(OpType::CX, {qubits[2], qubits[1]}); - circ.add_op(OpType::ZZPhase, 0.8, {qubits[4], qubits[3]}); - circ.add_op(OpType::CX, {qubits[2], qubits[4]}); - - std::map rename_map = { - {qubits[4], nodes[4]}, {qubits[1], nodes[1]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - VertPort pre_label_0 = - mf->quantum_boundary->get().find(qubits[2])->second; - VertPort pre_label_3 = - mf->quantum_boundary->get().find(qubits[3])->second; - LexiLabellingMethod lrm; - REQUIRE(lrm.check_method(mf, shared_arc)); - lrm.routing_method(mf, shared_arc); - REQUIRE( - mf->quantum_boundary->get().find(qubits[2]) == - mf->quantum_boundary->get().end()); - REQUIRE( - mf->quantum_boundary->get().find(qubits[3]) == - mf->quantum_boundary->get().end()); - VertPort post_label_0 = - mf->quantum_boundary->get().find(nodes[0])->second; - REQUIRE(pre_label_0 == post_label_0); - VertPort post_label_3 = - mf->quantum_boundary->get().find(nodes[3])->second; - REQUIRE(pre_label_3 == post_label_3); - } - GIVEN( - "Two unlabelled qubits, two slices, lookahead unrouted, check and " - "route.") { - Circuit circ(5); - std::vector qubits = circ.all_qubits(); - circ.add_op(OpType::CX, {qubits[2], qubits[1]}); - circ.add_op(OpType::ZZPhase, 0.8, {qubits[4], qubits[3]}); - circ.add_op(OpType::CX, {qubits[2], qubits[0]}); - - std::map rename_map = { - {qubits[4], nodes[4]}, {qubits[1], nodes[1]}}; - circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); - VertPort pre_label_0 = - mf->quantum_boundary->get().find(qubits[2])->second; - VertPort pre_label_3 = - mf->quantum_boundary->get().find(qubits[3])->second; - LexiLabellingMethod lrm; - REQUIRE(lrm.check_method(mf, shared_arc)); - lrm.routing_method(mf, shared_arc); - REQUIRE( - mf->quantum_boundary->get().find(qubits[2]) == - mf->quantum_boundary->get().end()); - REQUIRE( - mf->quantum_boundary->get().find(qubits[3]) == - mf->quantum_boundary->get().end()); - VertPort post_label_0 = - mf->quantum_boundary->get().find(nodes[0])->second; - REQUIRE(pre_label_0 == post_label_0); - VertPort post_label_3 = - mf->quantum_boundary->get().find(nodes[3])->second; - REQUIRE(pre_label_3 == post_label_3); - } -} SCENARIO("Test LexiRouteRoutingMethod") { std::vector nodes = { Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), @@ -817,7 +577,7 @@ SCENARIO("Test LexiRouteRoutingMethod") { REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); } } -SCENARIO("Test MappingManager with LexiRouteRoutingMethod and LexiLabelling") { +SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { GIVEN("11 Node Architecture, 11 Qubit circuit, multiple SWAP required.") { std::vector nodes = { Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), @@ -846,7 +606,7 @@ SCENARIO("Test MappingManager with LexiRouteRoutingMethod and LexiLabelling") { ArchitecturePtr shared_arc = std::make_shared(architecture); Circuit circ(11); std::vector qubits = circ.all_qubits(); - for (unsigned i = 0; i < 11; i++) { + for (unsigned i = 0; i < 10; i++) { circ.add_op(OpType::CX, {qubits[0], qubits[4]}); circ.add_op(OpType::CX, {qubits[6], qubits[7]}); circ.add_op(OpType::CX, {qubits[1], qubits[10]}); @@ -868,11 +628,8 @@ SCENARIO("Test MappingManager with LexiRouteRoutingMethod and LexiLabelling") { std::shared_ptr mf = std::make_shared(copy_circ); - LexiLabellingMethod lrm; std::vector vrm = { - std::make_shared(lrm), std::make_shared(100)}; - REQUIRE(vrm[0]->check_method(mf, shared_arc)); bool res = mm.route_circuit(circ, vrm); @@ -900,9 +657,7 @@ SCENARIO("Test MappingManager with LexiRouteRoutingMethod and LexiLabelling") { PassPtr dec = gen_decompose_routing_gates_to_cxs_pass(sg, false); MappingManager mm(shared_arc); - LexiLabellingMethod lrm; std::vector vrm = { - std::make_shared(lrm), std::make_shared(100)}; bool res = mm.route_circuit(circ, vrm); @@ -928,7 +683,8 @@ SCENARIO( RingArch arc(4); MappingManager mm(std::make_shared(arc)); REQUIRE(mm.route_circuit( - test_circuit, {std::make_shared()})); + test_circuit, {std::make_shared(), + std::make_shared()})); REQUIRE(test_circuit.n_gates() == 4); } GIVEN("A solved problem supplied with map and custom architecture") { @@ -948,7 +704,8 @@ SCENARIO( MappingManager mm(std::make_shared(test_arc)); REQUIRE(!mm.route_circuit( - test_circuit, {std::make_shared()})); + test_circuit, {std::make_shared(), + std::make_shared()})); qubit_vector_t all_qs_post_solve = test_circuit.all_qubits(); REQUIRE(all_qs_post_place == all_qs_post_solve); @@ -962,8 +719,9 @@ SCENARIO("Empty Circuit test") { circ.add_blank_wires(4); Architecture arc({{0, 1}, {1, 2}, {2, 3}}); MappingManager mm(std::make_shared(arc)); - REQUIRE( - !mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(!mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); REQUIRE(circ.n_gates() == 0); } } @@ -979,8 +737,9 @@ SCENARIO("Routing on circuit with no multi-qubit gates") { unsigned orig_vertices = circ.n_vertices(); Architecture arc({{0, 1}, {1, 2}, {2, 3}}); MappingManager mm(std::make_shared(arc)); - REQUIRE( - !mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(!mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); REQUIRE(orig_vertices - 8 == circ.n_gates()); } } @@ -995,8 +754,9 @@ SCENARIO("Test routing on a directed architecture with bidirectional edges") { // routing ignored bi directional edge and solves correctly MappingManager mm(std::make_shared(arc)); - REQUIRE( - mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); REQUIRE(circ.n_gates() == 2); CHECK(respects_connectivity_constraints(circ, arc, false)); } @@ -1020,8 +780,9 @@ SCENARIO( Architecture arc(std::vector>{{0, 1}}); MappingManager mm(std::make_shared(arc)); - REQUIRE( - mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); REQUIRE(circ.n_gates() == 8); } } @@ -1048,8 +809,9 @@ SCENARIO("Dense CX circuits route succesfully") { {11, 12}, {12, 16}, {12, 17}, {12, 13}, {13, 18}, {13, 19}, {13, 14}, {14, 18}, {14, 19}, {15, 16}, {16, 17}, {17, 18}, {18, 19}}); MappingManager mm(std::make_shared(arc)); - REQUIRE( - mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); (Transforms::decompose_SWAP_to_CX() >> Transforms::decompose_BRIDGE_to_CX()) .apply(circ); @@ -1074,8 +836,9 @@ SCENARIO( } RingArch arc(29); MappingManager mm(std::make_shared(arc)); - REQUIRE( - mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); Transforms::decompose_SWAP_to_CX().apply(circ); REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); } @@ -1112,8 +875,9 @@ SCENARIO( {12, 13}, {6, 7}}); MappingManager mm(std::make_shared(arc)); - REQUIRE( - mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); } } @@ -1123,8 +887,9 @@ SCENARIO("Empty circuits, with and without blank wires") { Circuit circ(6); RingArch arc(6); MappingManager mm(std::make_shared(arc)); - REQUIRE( - !mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(!mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); REQUIRE(circ.depth() == 0); REQUIRE(circ.n_gates() == 0); REQUIRE(circ.n_qubits() == 6); @@ -1134,8 +899,9 @@ SCENARIO("Empty circuits, with and without blank wires") { Circuit circ(0); RingArch arc(6); MappingManager mm(std::make_shared(arc)); - REQUIRE( - !mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(!mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); REQUIRE(circ.depth() == 0); REQUIRE(circ.n_gates() == 0); REQUIRE(circ.n_qubits() == 0); diff --git a/tket/tests/test_MappingVerification.cpp b/tket/tests/test_MappingVerification.cpp index ce9be2d3c1..915ef5c7df 100644 --- a/tket/tests/test_MappingVerification.cpp +++ b/tket/tests/test_MappingVerification.cpp @@ -14,13 +14,13 @@ #include -#include "Mapping/MappingManager.hpp" #include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" #include "Mapping/Verification.hpp" #include "Placement/Placement.hpp" #include "testutil.hpp" -namespace tket{ +namespace tket { SCENARIO( "Test validity of circuit against architecture using " "respects_connectivity_constraints method.", @@ -34,7 +34,8 @@ SCENARIO( LinePlacement lp_obj(test_arc); lp_obj.place(circ); MappingManager mm(std::make_shared(test_arc)); - REQUIRE(mm.route_circuit(circ, {std::make_shared()})); + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); CHECK(respects_connectivity_constraints(circ, test_arc, false)); } GIVEN("A failing case, undirected") { @@ -110,4 +111,4 @@ SCENARIO( REQUIRE(respects_connectivity_constraints(circ, arc, false)); } } -} // namespace tket \ No newline at end of file +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_RoutingPasses.cpp b/tket/tests/test_RoutingPasses.cpp index 5065916e42..e732e4d91e 100644 --- a/tket/tests/test_RoutingPasses.cpp +++ b/tket/tests/test_RoutingPasses.cpp @@ -18,13 +18,14 @@ #include "Characterisation/DeviceCharacterisation.hpp" #include "Circuit/Circuit.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/Verification.hpp" #include "OpType/OpType.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" #include "Predicates/Predicates.hpp" -#include "Mapping/MappingManager.hpp" -#include "Mapping/LexiRoute.hpp" -#include "Mapping/Verification.hpp" #include "Simulation/CircuitSimulator.hpp" #include "Simulation/ComparisonFunctions.hpp" #include "Transformations/BasicOptimisation.hpp" @@ -192,7 +193,9 @@ SCENARIO("Test decompose_SWAP_to_CX pass", "[routing]") { GIVEN("A routed network of SWAP gates.") { SquareGrid grid(2, 5); MappingManager mm(std::make_shared(grid)); - REQUIRE(mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); Transforms::decompose_SWAP_to_CX().apply(circ); REQUIRE(respects_connectivity_constraints(circ, grid, false, true)); GIVEN("Directed CX gates") { @@ -277,7 +280,9 @@ SCENARIO("Test redirect_CX_gates pass", "[routing]") { } } MappingManager mm(std::make_shared(grid)); - REQUIRE(mm.route_circuit(circ, {std::make_shared()})); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); Transforms::decompose_BRIDGE_to_CX().apply(circ); Transforms::decompose_SWAP_to_CX(arc).apply(circ); Transforms::decompose_CX_directed(grid).apply(circ); @@ -285,8 +290,6 @@ SCENARIO("Test redirect_CX_gates pass", "[routing]") { } } - - SCENARIO("Routing preserves the number of qubits") { std::vector> cons; cons.push_back({Node("x", 1), Node("x", 0)}); @@ -330,6 +333,141 @@ SCENARIO("Default mapping pass delays measurements") { REQUIRE(!mid_meas_pred->verify(cu.get_circ_ref())); REQUIRE(mid_meas_pred->verify(cu2.get_circ_ref())); } + +SCENARIO( + "Methods related to correct routing and decomposition of circuits with " + "classical wires.") { + GIVEN("A circuit with classical wires on CX gates.") { + Architecture test_arc({{0, 1}, {1, 2}}); + Circuit circ(3, 2); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::H, {0}); + circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0, 1}, 0); + circ.add_conditional_gate(OpType::CX, {}, {2, 1}, {0, 1}, 1); + circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0, 1}, 2); + circ.add_conditional_gate(OpType::CX, {}, {2, 1}, {1, 0}, 3); + circ.add_conditional_gate(OpType::CX, {}, {0, 2}, {0, 1}, 0); + MappingManager mm(std::make_shared(test_arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, test_arc, false, false)); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, test_arc, false, false)); + } + GIVEN( + "A circuit that requires modification to satisfy architecture " + "constraints.") { + Architecture sg({{0, 1}, {1, 2}, {2, 3}, {3, 4}}); + Circuit circ(5, 1); + circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0}, 1); + add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}, {1, 3}, {1, 4}, {0, 1}}); + + MappingManager mm(std::make_shared(sg)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, sg, false, false)); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, sg, false, false)); + Command classical_com = circ.get_commands()[0]; + REQUIRE(classical_com.get_args()[0] == circ.all_bits()[0]); + } + GIVEN("A single Bridge gate with multiple classical wires, decomposed.") { + Architecture arc({{0, 1}, {1, 2}}); + Circuit circ(3, 3); + circ.add_conditional_gate( + OpType::BRIDGE, {}, {0, 1, 2}, {0, 1, 2}, 1); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + for (Command com : circ.get_commands()) { + REQUIRE(com.get_args()[0] == circ.all_bits()[0]); + REQUIRE(com.get_args()[1] == circ.all_bits()[1]); + REQUIRE(com.get_args()[2] == circ.all_bits()[2]); + } + } + GIVEN("A directed architecture, a single CX gate that requires flipping.") { + Architecture arc(std::vector>{{0, 1}}); + Circuit circ(2, 2); + circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {1, 0}, 0); + circ.add_conditional_gate(OpType::CX, {}, {1, 0}, {0, 1}, 1); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, false)); + REQUIRE(!respects_connectivity_constraints(circ, arc, true, false)); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true, false)); + std::vector all_coms = circ.get_commands(); + REQUIRE(all_coms[0].get_args()[0] == circ.all_bits()[1]); + REQUIRE(all_coms[0].get_args()[1] == circ.all_bits()[0]); + REQUIRE(all_coms[1].get_args()[0] == circ.all_bits()[0]); + REQUIRE(all_coms[1].get_args()[1] == circ.all_bits()[1]); + } + GIVEN( + "A large circuit, with a mixture of conditional CX and CZ with " + "multiple classical wires, non conditional CX and CZ, and single " + "qubit gates.") { + SquareGrid arc(5, 10); + Circuit circ(50, 10); + for (unsigned i = 0; i < 48; i++) { + circ.add_op(OpType::CX, {i, i + 1}); + circ.add_conditional_gate( + OpType::CX, {}, {i + 2, i}, {0, 2, 3, 5}, 1); + circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); + circ.add_conditional_gate( + OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); + circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, 1); + circ.add_conditional_gate( + OpType::CZ, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); + circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); + } + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + } + GIVEN( + "A large circuit, with a mixture of conditional CX and CX gates with " + "multiple classical wires, non conditional CX and, single qubit " + "gates, and a directed architecture.") { + SquareGrid arc(10, 4, 2); + Circuit circ(60, 10); + for (unsigned i = 0; i < 58; i++) { + circ.add_op(OpType::CX, {i, i + 1}); + circ.add_conditional_gate( + OpType::CX, {}, {i + 2, i}, {0, 2, 3, 5}, 1); + circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); + circ.add_conditional_gate( + OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); + circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, 1); + circ.add_conditional_gate( + OpType::CX, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); + circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); + } + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true, true)); + } +} + SCENARIO( "Does copying decompose_SWAP_to_CX pass and applying it to a routed " "Circuit work correctly?") { @@ -348,8 +486,10 @@ SCENARIO( {0, 3}}); Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); MappingManager mm(std::make_shared(arc)); - REQUIRE(mm.route_circuit(circ, {std::make_shared()})); - + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + Transform T_1 = Transforms::decompose_SWAP_to_CX(); T_1.apply(circ); REQUIRE(circ.count_gates(OpType::SWAP) == 0); From c819859e0d22d5098ade9909713978225f897d15 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 17:32:18 +0000 Subject: [PATCH 104/146] remove token_swapping_method --- pytket/binders/mapping.cpp | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index da2fb95c66..3eea34538f 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -23,29 +23,13 @@ #include "Mapping/LexiRoute.hpp" #include "Mapping/MappingManager.hpp" #include "Mapping/RoutingMethodCircuit.hpp" -#include "TokenSwapping/main_entry_functions.hpp" #include "binder_utils.hpp" namespace py = pybind11; namespace tket { -std::vector> get_ts_swaps( - const Architecture& architecture, const NodeMapping& node_mapping) { - return get_swaps(architecture, node_mapping); -} - PYBIND11_MODULE(mapping, m) { - m.def( - "get_token_swapping_network", &get_ts_swaps, - "For a given architecture and map from Node to Node, returns a list of " - "tuple of Node corresponding to a sequence of SWAP gates that would map" - "a state from the first node to second node. \n\n:param architecture: " - "Architecture SWAP network respects. \n:param node_mapping: Node from " - "and to " - "some logical state must travel.", - py::arg("architecture"), py::arg("node_mapping")); - py::class_>( m, "RoutingMethod", "Parent class for RoutingMethod, for inheritance purposes only, not for " From d219fefbc6a49c440e8fb1daed62768371e18568 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 17:53:07 +0000 Subject: [PATCH 105/146] Update test_lexiroute --- pytket/binders/mapping.cpp | 2 +- tket/src/Predicates/PassGenerators.cpp | 2 +- tket/tests/test_LexiRoute.cpp | 289 +++++++++++++++++++++++-- tket/tests/test_RoutingPasses.cpp | 10 +- 4 files changed, 274 insertions(+), 29 deletions(-) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index 3eea34538f..d87e00c9bb 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -18,8 +18,8 @@ #include #include -#include "Mapping/LexiLabelling.hpp" #include "Circuit/Circuit.hpp" +#include "Mapping/LexiLabelling.hpp" #include "Mapping/LexiRoute.hpp" #include "Mapping/MappingManager.hpp" #include "Mapping/RoutingMethodCircuit.hpp" diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index 36b6561920..564692ea11 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -233,7 +233,7 @@ PassPtr gen_routing_pass( PredicatePtr n_qubit_pred = std::make_shared(arc.n_nodes()); PredicatePtrMap precons{ - // CompilationUnit::make_type_pair(placedpred), + CompilationUnit::make_type_pair(placedpred), CompilationUnit::make_type_pair(twoqbpred), CompilationUnit::make_type_pair(n_qubit_pred)}; diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index de0b430969..b188fac27d 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -14,8 +14,8 @@ #include -#include "Mapping/LexiRoute.hpp" #include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" #include "Mapping/MappingManager.hpp" #include "Mapping/Verification.hpp" #include "Predicates/CompilationUnit.hpp" @@ -24,8 +24,10 @@ #include "Predicates/PassLibrary.hpp" #include "Transformations/Decomposition.hpp" #include "testutil.hpp" + namespace tket { -SCENARIO("Test LexiRoute::solve") { + +SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::vector nodes = {Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), Node("node_test", 3), Node("node_test", 4), Node("node_test", 5), @@ -92,8 +94,8 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf0 = std::make_shared(circ); LexiRoute lr(shared_arc, mf0); - - lr.solve(4); + lr.solve_labelling(); + // lr.solve(4); REQUIRE(mf0->circuit_.n_gates() == 3); @@ -103,7 +105,9 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf1 = std::make_shared(circ); LexiRoute lr1(shared_arc, mf1); + // lr1.solve_labelling(); lr1.solve(4); + std::vector commands = mf1->circuit_.get_commands(); Command swap_c = commands[1]; unit_vector_t uids = {nodes[1], nodes[2]}; @@ -155,7 +159,8 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr0(shared_arc, mf); - lr0.solve(20); + lr0.solve_labelling(); + // lr0.solve(20); std::vector commands = mf->circuit_.get_commands(); REQUIRE(commands.size() == 4); Command c = commands[0]; @@ -164,19 +169,19 @@ SCENARIO("Test LexiRoute::solve") { mf->advance_frontier_boundary(shared_arc); LexiRoute lr1(shared_arc, mf); - lr1.solve(20); + lr1.solve_labelling(); uids = {nodes[2], nodes[3]}; REQUIRE(mf->circuit_.get_commands()[1].get_args() == uids); mf->advance_frontier_boundary(shared_arc); LexiRoute lr2(shared_arc, mf); - lr2.solve(20); + lr2.solve_labelling(); uids = {nodes[2], nodes[5]}; REQUIRE(mf->circuit_.get_commands()[2].get_args() == uids); mf->advance_frontier_boundary(shared_arc); LexiRoute lr3(shared_arc, mf); - lr3.solve(20); + lr3.solve_labelling(); uids = {nodes[5], nodes[6]}; REQUIRE(mf->circuit_.get_commands()[3].get_args() == uids); } @@ -300,7 +305,7 @@ SCENARIO("Test LexiRoute::solve") { mf->advance_frontier_boundary(shared_arc); LexiRoute lr1(shared_arc, mf); - lr1.solve(20); + lr1.solve_labelling(); REQUIRE(circ.all_qubits()[0] == nodes[3]); } @@ -336,7 +341,7 @@ SCENARIO("Test LexiRoute::solve") { std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); LexiRoute lr0(shared_arc, mf); - lr0.solve(20); + lr0.solve_labelling(); mf->advance_frontier_boundary(shared_arc); LexiRoute lr1(shared_arc, mf); @@ -372,8 +377,9 @@ SCENARIO("Test LexiRoute::solve") { std::make_shared(circ); mf->ancilla_nodes_.insert(nodes[3]); mf->advance_frontier_boundary(shared_arc); + LexiRoute lr0(shared_arc, mf); - lr0.solve(20); + lr0.solve_labelling(); REQUIRE(circ.all_qubits()[1] == nodes[4]); REQUIRE(circ.all_qubits()[0] == nodes[3]); @@ -411,8 +417,8 @@ SCENARIO("Test LexiRoute::solve") { REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); } GIVEN( - "Labelling is required, but there are no free remaining qubits, for one " - "updated label, order 0.") { + "Labelling is required, but there are no free remaining qubits, for" + "one updated label, order 0.") { Circuit circ(9); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[1], qubits[8]}); @@ -429,11 +435,11 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); - REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); + REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); } GIVEN( - "Labelling is required, but there are no free remaining qubits, for one " - "updated label, order 1.") { + "Labelling is required, but there are no free remaining qubits, for " + " one updated label, order 1.") { Circuit circ(9); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[1], qubits[8]}); @@ -450,11 +456,11 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); - REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); + REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); } GIVEN( - "Labelling is required, but there are no free remaining qubits, for two " - "updated labels.") { + "Labelling is required, but there are no free remaining qubits, for" + "two updated labels.") { Circuit circ(10); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[9], qubits[8]}); @@ -471,10 +477,244 @@ SCENARIO("Test LexiRoute::solve") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); - REQUIRE_THROWS_AS(lr.solve(1), LexiRouteError); + REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); } } +SCENARIO("Test LexiLabellingMethod") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4)}; + + // straight line + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("No qubit to label, empty frontier, check_method.") { + Circuit circ(5); + std::shared_ptr mf = + std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.check_method(mf, shared_arc)); + } + GIVEN("No qubit to label, partially filled frontier, check_method.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); + circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[0]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, + {qubits[4], nodes[4]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.check_method(mf, shared_arc)); + } + GIVEN("Qubit to label, but casually restricted, check_method.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); + circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[0]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[4], nodes[4]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.check_method(mf, shared_arc)); + } + GIVEN( + "Two Qubit to label in future slice, causally restricted, " + "check_method.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[4]}); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.check_method(mf, shared_arc)); + } + GIVEN("Three Qubit Gate, all labelled, first slice, check_method.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CCX, {qubits[1], qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, + {qubits[4], nodes[4]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.check_method(mf, shared_arc)); + } + GIVEN("One unlabelled qubit, one slice, check and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + VertPort pre_label = + mf->quantum_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + REQUIRE(lrm.check_method(mf, shared_arc)); + lrm.routing_method(mf, shared_arc); + REQUIRE( + mf->quantum_boundary->get().find(qubits[3]) == + mf->quantum_boundary->get().end()); + VertPort post_label = + mf->quantum_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label == post_label); + } + GIVEN( + "One unlabelled qubit, two slices, lookahead for better solution, check" + " and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[2], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + VertPort pre_label = + mf->quantum_boundary->get().find(qubits[2])->second; + LexiLabellingMethod lrm; + REQUIRE(lrm.check_method(mf, shared_arc)); + lrm.routing_method(mf, shared_arc); + REQUIRE( + mf->quantum_boundary->get().find(qubits[2]) == + mf->quantum_boundary->get().end()); + VertPort post_label = + mf->quantum_boundary->get().find(nodes[2])->second; + REQUIRE(pre_label == post_label); + } + GIVEN("Two unlabelled qubits, one slice, check and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[2], qubits[3]}); + + std::map rename_map = { + {qubits[2], nodes[2]}, {qubits[1], nodes[1]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + VertPort pre_label_0 = + mf->quantum_boundary->get().find(qubits[0])->second; + VertPort pre_label_3 = + mf->quantum_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + REQUIRE(lrm.check_method(mf, shared_arc)); + lrm.routing_method(mf, shared_arc); + REQUIRE( + mf->quantum_boundary->get().find(qubits[0]) == + mf->quantum_boundary->get().end()); + REQUIRE( + mf->quantum_boundary->get().find(qubits[3]) == + mf->quantum_boundary->get().end()); + VertPort post_label_0 = + mf->quantum_boundary->get().find(nodes[0])->second; + REQUIRE(pre_label_0 == post_label_0); + VertPort post_label_3 = + mf->quantum_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label_3 == post_label_3); + } + GIVEN("Two unlabelled qubits, two slices, lookahead, check and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[2], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[4], qubits[3]}); + circ.add_op(OpType::CX, {qubits[2], qubits[4]}); + + std::map rename_map = { + {qubits[4], nodes[4]}, {qubits[1], nodes[1]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + VertPort pre_label_0 = + mf->quantum_boundary->get().find(qubits[2])->second; + VertPort pre_label_3 = + mf->quantum_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + REQUIRE(lrm.check_method(mf, shared_arc)); + lrm.routing_method(mf, shared_arc); + REQUIRE( + mf->quantum_boundary->get().find(qubits[2]) == + mf->quantum_boundary->get().end()); + REQUIRE( + mf->quantum_boundary->get().find(qubits[3]) == + mf->quantum_boundary->get().end()); + VertPort post_label_0 = + mf->quantum_boundary->get().find(nodes[0])->second; + REQUIRE(pre_label_0 == post_label_0); + VertPort post_label_3 = + mf->quantum_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label_3 == post_label_3); + } + GIVEN( + "Two unlabelled qubits, two slices, lookahead unrouted, check and " + "route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[2], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[4], qubits[3]}); + circ.add_op(OpType::CX, {qubits[2], qubits[0]}); + + std::map rename_map = { + {qubits[4], nodes[4]}, {qubits[1], nodes[1]}}; + circ.rename_units(rename_map); + std::shared_ptr mf = + std::make_shared(circ); + VertPort pre_label_0 = + mf->quantum_boundary->get().find(qubits[2])->second; + VertPort pre_label_3 = + mf->quantum_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + REQUIRE(lrm.check_method(mf, shared_arc)); + lrm.routing_method(mf, shared_arc); + REQUIRE( + mf->quantum_boundary->get().find(qubits[2]) == + mf->quantum_boundary->get().end()); + REQUIRE( + mf->quantum_boundary->get().find(qubits[3]) == + mf->quantum_boundary->get().end()); + VertPort post_label_0 = + mf->quantum_boundary->get().find(nodes[0])->second; + REQUIRE(pre_label_0 == post_label_0); + VertPort post_label_3 = + mf->quantum_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label_3 == post_label_3); + } +} SCENARIO("Test LexiRouteRoutingMethod") { std::vector nodes = { Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), @@ -577,7 +817,7 @@ SCENARIO("Test LexiRouteRoutingMethod") { REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); } } -SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { +SCENARIO("Test MappingManager with LexiRouteRoutingMethod and LexiLabelling") { GIVEN("11 Node Architecture, 11 Qubit circuit, multiple SWAP required.") { std::vector nodes = { Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), @@ -606,7 +846,7 @@ SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { ArchitecturePtr shared_arc = std::make_shared(architecture); Circuit circ(11); std::vector qubits = circ.all_qubits(); - for (unsigned i = 0; i < 10; i++) { + for (unsigned i = 0; i < 11; i++) { circ.add_op(OpType::CX, {qubits[0], qubits[4]}); circ.add_op(OpType::CX, {qubits[6], qubits[7]}); circ.add_op(OpType::CX, {qubits[1], qubits[10]}); @@ -628,8 +868,11 @@ SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { std::shared_ptr mf = std::make_shared(copy_circ); + LexiLabellingMethod lrm; std::vector vrm = { + std::make_shared(lrm), std::make_shared(100)}; + REQUIRE(vrm[0]->check_method(mf, shared_arc)); bool res = mm.route_circuit(circ, vrm); @@ -657,7 +900,9 @@ SCENARIO("Test MappingManager::route_circuit with lc_route_subcircuit") { PassPtr dec = gen_decompose_routing_gates_to_cxs_pass(sg, false); MappingManager mm(shared_arc); + LexiLabellingMethod lrm; std::vector vrm = { + std::make_shared(lrm), std::make_shared(100)}; bool res = mm.route_circuit(circ, vrm); diff --git a/tket/tests/test_RoutingPasses.cpp b/tket/tests/test_RoutingPasses.cpp index e732e4d91e..1c0b9acbfd 100644 --- a/tket/tests/test_RoutingPasses.cpp +++ b/tket/tests/test_RoutingPasses.cpp @@ -18,8 +18,8 @@ #include "Characterisation/DeviceCharacterisation.hpp" #include "Circuit/Circuit.hpp" -#include "Mapping/LexiRoute.hpp" #include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" #include "Mapping/MappingManager.hpp" #include "Mapping/Verification.hpp" #include "OpType/OpType.hpp" @@ -360,20 +360,20 @@ SCENARIO( GIVEN( "A circuit that requires modification to satisfy architecture " "constraints.") { - Architecture sg({{0, 1}, {1, 2}, {2, 3}, {3, 4}}); + Architecture arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}}); Circuit circ(5, 1); circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0}, 1); add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}, {1, 3}, {1, 4}, {0, 1}}); - MappingManager mm(std::make_shared(sg)); + MappingManager mm(std::make_shared(arc)); REQUIRE(mm.route_circuit( circ, {std::make_shared(), std::make_shared()})); Transforms::decompose_SWAP_to_CX().apply(circ); - REQUIRE(respects_connectivity_constraints(circ, sg, false, false)); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); Transforms::decompose_BRIDGE_to_CX().apply(circ); - REQUIRE(respects_connectivity_constraints(circ, sg, false, false)); + REQUIRE(respects_connectivity_constraints(circ, arc, false, false)); Command classical_com = circ.get_commands()[0]; REQUIRE(classical_com.get_args()[0] == circ.all_bits()[0]); } From ed593091938ea3d936ef10ed880f24a889d07c5d Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 18:05:14 +0000 Subject: [PATCH 106/146] add predicate back --- tket/src/Predicates/PassGenerators.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index 564692ea11..d37eb925c4 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -229,7 +229,7 @@ PassPtr gen_routing_pass( Transform t = Transform(trans); PredicatePtr twoqbpred = std::make_shared(); - // PredicatePtr placedpred = std::make_shared(arc); + PredicatePtr placedpred = std::make_shared(arc); PredicatePtr n_qubit_pred = std::make_shared(arc.n_nodes()); PredicatePtrMap precons{ From 57126ce2f6d220f9c5352b41ff2d949498bcd2c5 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 18:29:51 +0000 Subject: [PATCH 107/146] update setup.py --- pytket/setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytket/setup.py b/pytket/setup.py index 9a04452272..79baa0f882 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -126,6 +126,7 @@ def run(self): "tket-Characterisation", "tket-Converters", "tket-TokenSwapping", + "tket-TokenSwappingWithArch", "tket-Placement", "tket-Mapping", "tket-MeasurementSetup", From df61866d4de929025df92c8665fd130625c742ef Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 18:43:26 +0000 Subject: [PATCH 108/146] Add LexiLabellingMethod --- pytket/tests/placement_test.py | 12 ++++++------ pytket/tests/transform_test.py | 14 ++++++++++---- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/pytket/tests/placement_test.py b/pytket/tests/placement_test.py index 2db42081be..23149db5c9 100644 --- a/pytket/tests/placement_test.py +++ b/pytket/tests/placement_test.py @@ -24,7 +24,7 @@ place_with_map, ) from pytket.passes import PauliSimp, DefaultMappingPass # type: ignore -from pytket.mapping import MappingManager, LexiRouteRoutingMethod # type: ignore +from pytket.mapping import MappingManager, LexiRouteRoutingMethod, LexiLabellingMethod # type: ignore from pytket.qasm import circuit_from_qasm # type: ignore import json @@ -73,9 +73,9 @@ def test_placements() -> None: assert circ_qbs != graph_placed.qubits mm = MappingManager(test_architecture) - mm.route_circuit(base_placed, [LexiRouteRoutingMethod()]) - mm.route_circuit(line_placed, [LexiRouteRoutingMethod()]) - mm.route_circuit(graph_placed, [LexiRouteRoutingMethod()]) + mm.route_circuit(base_placed, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) + mm.route_circuit(line_placed, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) + mm.route_circuit(graph_placed, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) assert base_placed.valid_connectivity(test_architecture, False) assert line_placed.valid_connectivity(test_architecture, False) @@ -121,8 +121,8 @@ def test_placement_config() -> None: assert map1 != map2 mm = MappingManager(test_architecture) - mm.route_circuit(circ1, [LexiRouteRoutingMethod()]) - mm.route_circuit(circ2, [LexiRouteRoutingMethod()]) + mm.route_circuit(circ1, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) + mm.route_circuit(circ2, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) assert circ1.n_gates < circ2.n_gates diff --git a/pytket/tests/transform_test.py b/pytket/tests/transform_test.py index 8421d4aea1..5f2ff503f0 100644 --- a/pytket/tests/transform_test.py +++ b/pytket/tests/transform_test.py @@ -31,7 +31,7 @@ from pytket.transform import Transform, CXConfigType, PauliSynthStrat # type: ignore from pytket.qasm import circuit_from_qasm from pytket.architecture import Architecture # type: ignore -from pytket.mapping import MappingManager, LexiRouteRoutingMethod # type: ignore +from pytket.mapping import MappingManager, LexiRouteRoutingMethod, LexiLabellingMethod # type: ignore from pytket.placement import Placement, GraphPlacement, LinePlacement, NoiseAwarePlacement # type: ignore from sympy import Symbol # type: ignore @@ -762,7 +762,9 @@ def test_decompose_swap_to_cx() -> None: pl = Placement(arc) pl.place_with_map(circ, init_map) - MappingManager(arc).route_circuit(circ, [LexiRouteRoutingMethod()]) + MappingManager(arc).route_circuit( + circ, [LexiLabellingMethod(), LexiRouteRoutingMethod()] + ) assert circ.valid_connectivity(arc, False) Transform.DecomposeSWAPtoCX(arc).apply(circ) assert len(circ.get_commands()) == 20 @@ -805,8 +807,12 @@ def test_FullMappingPass() -> None: cu_1 = CompilationUnit(circ) gp_placer = GraphPlacement(arc) lp_placer = LinePlacement(arc) - m_pass_0 = FullMappingPass(arc, gp_placer, [LexiRouteRoutingMethod()]) - m_pass_1 = FullMappingPass(arc, lp_placer, [LexiRouteRoutingMethod()]) + m_pass_0 = FullMappingPass( + arc, gp_placer, [LexiLabellingMethod(), LexiRouteRoutingMethod()] + ) + m_pass_1 = FullMappingPass( + arc, lp_placer, [LexiLabellingMethod(), LexiRouteRoutingMethod()] + ) m_pass_0.apply(cu_0) m_pass_1.apply(cu_1) out_circ_0 = cu_0.circuit From b75e7f928a8b6bb973896248f85ecbe39f1f16a3 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 18:57:52 +0000 Subject: [PATCH 109/146] Cover Alec's comments --- pytket/binders/mapping.cpp | 7 +++---- schemas/compiler_pass_v1.json | 2 +- .../include/Architecture/Architecture.hpp | 4 ++++ tket/src/Mapping/LexiRoute.cpp | 2 +- tket/src/Mapping/MultiGateReorder.cpp | 2 +- tket/src/Mapping/RoutingMethodJson.cpp | 2 +- tket/src/TokenSwapping/CMakeLists.txt | 1 - .../tests/test_ArchitectureAwareSynthesis.cpp | 6 +++--- tket/tests/test_MappingVerification.cpp | 9 +++++---- tket/tests/test_RoutingPasses.cpp | 19 ++++++++++--------- 10 files changed, 29 insertions(+), 25 deletions(-) diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index 0974165a74..d19ee7db01 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -74,11 +74,10 @@ PYBIND11_MODULE(mapping, m) { py::class_( m, "MappingManager", - "Defined by a pytket Architecture object, maps Circuit logical Qubits " - "to Physically permitted Architecture qubits. Mapping is completed by " + "Defined by a pytket Architecture object, maps Circuit logical qubits " + "to physically permitted Architecture qubits. Mapping is completed by " "sequential routing (full or partial) of subcircuits. A custom method " - "for " - "routing (full or partial) of subcircuits can be defined in Python.") + "for routing (full or partial) of subcircuits can be defined in Python.") .def( py::init(), "MappingManager constructor.\n\n:param architecture: pytket " diff --git a/schemas/compiler_pass_v1.json b/schemas/compiler_pass_v1.json index 840871a901..3b4d728346 100644 --- a/schemas/compiler_pass_v1.json +++ b/schemas/compiler_pass_v1.json @@ -1005,7 +1005,7 @@ "type": "object", "description": "A method used during circuit mapping.", "properties": { - "name": { + "name_of_method": { "type": "string", "description": "String identifying method and whether it can be serialized." }, diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index 348ad33294..1fc6ac4263 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -103,6 +103,10 @@ class Architecture : public ArchitectureBase> { */ node_set_t get_articulation_points(const Architecture &subarc) const; + /** + * Returns true if the given operation can be executed on the Architecture + * connectivity graph. + */ bool valid_operation( const OpType &optype, const std::vector &uids) const; diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index ed3b8480cd..c3cc2e5650 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -530,7 +530,7 @@ unsigned LexiRouteRoutingMethod::get_max_depth() const { nlohmann::json LexiRouteRoutingMethod::serialize() const { nlohmann::json j; j["depth"] = this->get_max_depth(); - j["name"] = "LexiRouteRoutingMethod"; + j["name_of_method"] = "LexiRouteRoutingMethod"; return j; } diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index 7065c15e75..c07ba7f603 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -279,7 +279,7 @@ nlohmann::json MultiGateReorderRoutingMethod::serialize() const { nlohmann::json j; j["depth"] = this->max_depth_; j["size"] = this->max_size_; - j["name"] = "MultiGateReorderRoutingMethod"; + j["name_of_method"] = "MultiGateReorderRoutingMethod"; return j; } diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index 1f9479c89f..2c2d18db63 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -30,7 +30,7 @@ void to_json(nlohmann::json& j, const std::vector& rmp_v) { void from_json(const nlohmann::json& j, std::vector& rmp_v) { for (const auto& c : j) { - std::string name = c.at("name").get(); + std::string name = c.at("name_of_method").get(); if (name == "LexiRouteRoutingMethod") { rmp_v.push_back(std::make_shared( LexiRouteRoutingMethod::deserialize(c))); diff --git a/tket/src/TokenSwapping/CMakeLists.txt b/tket/src/TokenSwapping/CMakeLists.txt index 3342fd69c8..aa0d9f275e 100644 --- a/tket/src/TokenSwapping/CMakeLists.txt +++ b/tket/src/TokenSwapping/CMakeLists.txt @@ -64,4 +64,3 @@ target_include_directories(tket-${COMP} ${TKET_${COMP}_INCLUDE_DIR} ${TKET_${COMP}_INCLUDE_DIR}/${COMP}) -target_link_libraries(tket-${COMP} PRIVATE ${CONAN_LIBS}) diff --git a/tket/tests/test_ArchitectureAwareSynthesis.cpp b/tket/tests/test_ArchitectureAwareSynthesis.cpp index 6737af45c3..e6ca39ece5 100644 --- a/tket/tests/test_ArchitectureAwareSynthesis.cpp +++ b/tket/tests/test_ArchitectureAwareSynthesis.cpp @@ -17,11 +17,11 @@ #include "Architecture/Architecture.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" -#include "testutil.hpp" #include "Simulation/CircuitSimulator.hpp" #include "Simulation/ComparisonFunctions.hpp" +#include "testutil.hpp" -namespace tket{ +namespace tket { using Connection = Architecture::Connection; SCENARIO("Routing of aas example") { GIVEN("aas routing - simple example") { @@ -369,4 +369,4 @@ SCENARIO("Routing of aas example") { REQUIRE(test_unitary_comparison(circ, result)); } } -} //namespace tket \ No newline at end of file +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MappingVerification.cpp b/tket/tests/test_MappingVerification.cpp index ce9be2d3c1..915ef5c7df 100644 --- a/tket/tests/test_MappingVerification.cpp +++ b/tket/tests/test_MappingVerification.cpp @@ -14,13 +14,13 @@ #include -#include "Mapping/MappingManager.hpp" #include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" #include "Mapping/Verification.hpp" #include "Placement/Placement.hpp" #include "testutil.hpp" -namespace tket{ +namespace tket { SCENARIO( "Test validity of circuit against architecture using " "respects_connectivity_constraints method.", @@ -34,7 +34,8 @@ SCENARIO( LinePlacement lp_obj(test_arc); lp_obj.place(circ); MappingManager mm(std::make_shared(test_arc)); - REQUIRE(mm.route_circuit(circ, {std::make_shared()})); + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); CHECK(respects_connectivity_constraints(circ, test_arc, false)); } GIVEN("A failing case, undirected") { @@ -110,4 +111,4 @@ SCENARIO( REQUIRE(respects_connectivity_constraints(circ, arc, false)); } } -} // namespace tket \ No newline at end of file +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_RoutingPasses.cpp b/tket/tests/test_RoutingPasses.cpp index 5065916e42..1c2981b331 100644 --- a/tket/tests/test_RoutingPasses.cpp +++ b/tket/tests/test_RoutingPasses.cpp @@ -18,13 +18,13 @@ #include "Characterisation/DeviceCharacterisation.hpp" #include "Circuit/Circuit.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/Verification.hpp" #include "OpType/OpType.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" #include "Predicates/Predicates.hpp" -#include "Mapping/MappingManager.hpp" -#include "Mapping/LexiRoute.hpp" -#include "Mapping/Verification.hpp" #include "Simulation/CircuitSimulator.hpp" #include "Simulation/ComparisonFunctions.hpp" #include "Transformations/BasicOptimisation.hpp" @@ -192,7 +192,8 @@ SCENARIO("Test decompose_SWAP_to_CX pass", "[routing]") { GIVEN("A routed network of SWAP gates.") { SquareGrid grid(2, 5); MappingManager mm(std::make_shared(grid)); - REQUIRE(mm.route_circuit(circ, {std::make_shared()})); + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); Transforms::decompose_SWAP_to_CX().apply(circ); REQUIRE(respects_connectivity_constraints(circ, grid, false, true)); GIVEN("Directed CX gates") { @@ -277,7 +278,8 @@ SCENARIO("Test redirect_CX_gates pass", "[routing]") { } } MappingManager mm(std::make_shared(grid)); - REQUIRE(mm.route_circuit(circ, {std::make_shared()})); + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); Transforms::decompose_BRIDGE_to_CX().apply(circ); Transforms::decompose_SWAP_to_CX(arc).apply(circ); Transforms::decompose_CX_directed(grid).apply(circ); @@ -285,8 +287,6 @@ SCENARIO("Test redirect_CX_gates pass", "[routing]") { } } - - SCENARIO("Routing preserves the number of qubits") { std::vector> cons; cons.push_back({Node("x", 1), Node("x", 0)}); @@ -348,8 +348,9 @@ SCENARIO( {0, 3}}); Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); MappingManager mm(std::make_shared(arc)); - REQUIRE(mm.route_circuit(circ, {std::make_shared()})); - + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); + Transform T_1 = Transforms::decompose_SWAP_to_CX(); T_1.apply(circ); REQUIRE(circ.count_gates(OpType::SWAP) == 0); From a1b5a49967f6331438f3c3016b71c9a65adf3ab7 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 19:03:27 +0000 Subject: [PATCH 110/146] format transform_test --- pytket/tests/transform_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pytket/tests/transform_test.py b/pytket/tests/transform_test.py index e4b4b6e848..9397a08daa 100644 --- a/pytket/tests/transform_test.py +++ b/pytket/tests/transform_test.py @@ -31,7 +31,7 @@ CXMappingPass, ) from pytket.predicates import CompilationUnit, NoMidMeasurePredicate # type: ignore -from pytket.passes.auto_rebase import _CX_CIRCS, NoAutoRebase +from pytket.passes.auto_rebase import _CX_CIRCS, NoAutoRebase from pytket.transform import Transform, CXConfigType, PauliSynthStrat # type: ignore from pytket.qasm import circuit_from_qasm from pytket.architecture import Architecture # type: ignore @@ -952,6 +952,8 @@ def test_CXMappingPass_terminates() -> None: p = CXMappingPass(arc, placer, directed_cx=False, delay_measures=False) res = p.apply(c) assert res + + def test_auto_rebase() -> None: pass_params = [ ({OpType.CX, OpType.Rz, OpType.Rx}, _library._CX(), _library._TK1_to_RzRx), From f24e76bb5973f13f9d5e4a0af4db5b2792c78572 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 19:10:47 +0000 Subject: [PATCH 111/146] change length of line --- pytket/pytket/architecture/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pytket/pytket/architecture/__init__.py b/pytket/pytket/architecture/__init__.py index 6ff2f2b9d7..6fc66ab2b5 100644 --- a/pytket/pytket/architecture/__init__.py +++ b/pytket/pytket/architecture/__init__.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""The `architecture` module provides an API to interact with the ::py:class:`Architecture` class.""" +"""The `architecture` module provides an API to interact with the + ::py:class:`Architecture` class.""" from pytket._tket.architecture import * # type: ignore From 7eaf6fee123a892041132912425f15c17ecf345f Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 19:23:53 +0000 Subject: [PATCH 112/146] remove trailing white space --- pytket/pytket/architecture/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytket/pytket/architecture/__init__.py b/pytket/pytket/architecture/__init__.py index 6fc66ab2b5..bf4309e634 100644 --- a/pytket/pytket/architecture/__init__.py +++ b/pytket/pytket/architecture/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""The `architecture` module provides an API to interact with the +"""The `architecture` module provides an API to interact with the ::py:class:`Architecture` class.""" from pytket._tket.architecture import * # type: ignore From 904bd16db773f58158d83a3fffad8adc86e84b7e Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 19:33:43 +0000 Subject: [PATCH 113/146] name -> name_of_ethod --- tket/tests/test_MultiGateReorder.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index a51d569794..1bf13b6eb8 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -383,7 +383,7 @@ SCENARIO("Test MappingManager with MultiGateReorderRoutingMethod") { SCENARIO("Test JSON serialisation") { GIVEN("MultiGateReorderRoutingMethod") { nlohmann::json j_rm; - j_rm["name"] = "MultiGateReorderRoutingMethod"; + j_rm["name_of_method"] = "MultiGateReorderRoutingMethod"; j_rm["depth"] = 3; j_rm["size"] = 4; MultiGateReorderRoutingMethod rm_loaded = @@ -394,9 +394,11 @@ SCENARIO("Test JSON serialisation") { GIVEN("RoutingMethod vector") { nlohmann::json j_rms = { - {{"name", "MultiGateReorderRoutingMethod"}, {"depth", 3}, {"size", 4}}, + {{"name_of_method", "MultiGateReorderRoutingMethod"}, + {"depth", 3}, + {"size", 4}}, { - {"name", "LexiRouteRoutingMethod"}, + {"name_of_method", "LexiRouteRoutingMethod"}, {"depth", 3}, }}; std::vector rms = From 8fe485fc8b138219a47eaeb0228aa2d20ca360e7 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 19:39:08 +0000 Subject: [PATCH 114/146] name -> name_of_method RoutingMethod --- tket/src/Mapping/include/Mapping/RoutingMethod.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp index 23041e2105..cb6c77e871 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp @@ -63,7 +63,7 @@ class RoutingMethod { virtual nlohmann::json serialize() const { nlohmann::json j; - j["name"] = "RoutingMethod"; + j["name_of_method"] = "RoutingMethod"; return j; } }; From ecd9b51411511ca2b195b58698853b7822a9249e Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Wed, 16 Feb 2022 19:47:10 +0000 Subject: [PATCH 115/146] tokenswappingwitharch --- pytket/setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytket/setup.py b/pytket/setup.py index 9a04452272..79baa0f882 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -126,6 +126,7 @@ def run(self): "tket-Characterisation", "tket-Converters", "tket-TokenSwapping", + "tket-TokenSwappingWithArch", "tket-Placement", "tket-Mapping", "tket-MeasurementSetup", From 329f223dbf1c038c8bae70b197b0114ca493002e Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Thu, 17 Feb 2022 08:55:00 +0000 Subject: [PATCH 116/146] add type ignores --- pytket/tests/architecture_test.py | 4 ++-- pytket/tests/mapping_test.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pytket/tests/architecture_test.py b/pytket/tests/architecture_test.py index 765fb6a9c0..7d476f6426 100644 --- a/pytket/tests/architecture_test.py +++ b/pytket/tests/architecture_test.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytket.circuit import Node -from pytket.architecture import Architecture, SquareGrid, FullyConnected +from pytket.circuit import Node # type: ignore +from pytket.architecture import Architecture, SquareGrid, FullyConnected # type: ignore def test_architectures() -> None: diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py index cc2cfcc82c..e9079e4509 100644 --- a/pytket/tests/mapping_test.py +++ b/pytket/tests/mapping_test.py @@ -16,7 +16,7 @@ from pytket.architecture import Architecture # type: ignore from pytket import Circuit, OpType from pytket.circuit import Node, Qubit # type: ignore -from pytket.placement import Placement +from pytket.placement import Placement # type: ignore from typing import Tuple, Dict From f9394eb932b8e625224974680e19891bc82fbc25 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Fri, 18 Feb 2022 10:42:23 +0000 Subject: [PATCH 117/146] "name_of_method" -> "name' --- schemas/compiler_pass_v1.json | 10 ++++------ tket/src/Mapping/LexiRoute.cpp | 2 +- tket/src/Mapping/MultiGateReorder.cpp | 2 +- tket/src/Mapping/RoutingMethodJson.cpp | 2 +- tket/src/Mapping/include/Mapping/RoutingMethod.hpp | 2 +- tket/tests/test_MultiGateReorder.cpp | 8 +++----- 6 files changed, 11 insertions(+), 15 deletions(-) diff --git a/schemas/compiler_pass_v1.json b/schemas/compiler_pass_v1.json index a011ae2a70..fa74d46b26 100644 --- a/schemas/compiler_pass_v1.json +++ b/schemas/compiler_pass_v1.json @@ -125,7 +125,6 @@ "$ref": "file:///circuit_v1.json#", "description": "A circuit implementing a CX gate in a target gate set. Used in \"RebaseCustom\"." }, - "basis_tk1_replacement": { "type": "string", "description": "A method for generating optimised single-qubit unitary circuits in a target gate set. This string should be interpreted by Python \"dill\" into a function. Used in \"RebaseCustom\" and \"SquashCustom\"." @@ -993,18 +992,17 @@ "op_link_errors" ] }, - - "routingmethod" :{ + "routingmethod": { "type": "object", "description": "A method used during circuit mapping.", "properties": { - "name_of_method": { + "name": { "type": "string", - "description": "String identifying method and whether it can be serialized." + "description": "Name of method." }, }, }, - "routing_config":{ + "routing_config": { "type": "array", "description": "A configuration for routing defined by an array of RoutingMethod.", "items": { diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index c3cc2e5650..ed3b8480cd 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -530,7 +530,7 @@ unsigned LexiRouteRoutingMethod::get_max_depth() const { nlohmann::json LexiRouteRoutingMethod::serialize() const { nlohmann::json j; j["depth"] = this->get_max_depth(); - j["name_of_method"] = "LexiRouteRoutingMethod"; + j["name"] = "LexiRouteRoutingMethod"; return j; } diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index c07ba7f603..7065c15e75 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -279,7 +279,7 @@ nlohmann::json MultiGateReorderRoutingMethod::serialize() const { nlohmann::json j; j["depth"] = this->max_depth_; j["size"] = this->max_size_; - j["name_of_method"] = "MultiGateReorderRoutingMethod"; + j["name"] = "MultiGateReorderRoutingMethod"; return j; } diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index 2c2d18db63..1f9479c89f 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -30,7 +30,7 @@ void to_json(nlohmann::json& j, const std::vector& rmp_v) { void from_json(const nlohmann::json& j, std::vector& rmp_v) { for (const auto& c : j) { - std::string name = c.at("name_of_method").get(); + std::string name = c.at("name").get(); if (name == "LexiRouteRoutingMethod") { rmp_v.push_back(std::make_shared( LexiRouteRoutingMethod::deserialize(c))); diff --git a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp index cb6c77e871..23041e2105 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp @@ -63,7 +63,7 @@ class RoutingMethod { virtual nlohmann::json serialize() const { nlohmann::json j; - j["name_of_method"] = "RoutingMethod"; + j["name"] = "RoutingMethod"; return j; } }; diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index 1bf13b6eb8..a51d569794 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -383,7 +383,7 @@ SCENARIO("Test MappingManager with MultiGateReorderRoutingMethod") { SCENARIO("Test JSON serialisation") { GIVEN("MultiGateReorderRoutingMethod") { nlohmann::json j_rm; - j_rm["name_of_method"] = "MultiGateReorderRoutingMethod"; + j_rm["name"] = "MultiGateReorderRoutingMethod"; j_rm["depth"] = 3; j_rm["size"] = 4; MultiGateReorderRoutingMethod rm_loaded = @@ -394,11 +394,9 @@ SCENARIO("Test JSON serialisation") { GIVEN("RoutingMethod vector") { nlohmann::json j_rms = { - {{"name_of_method", "MultiGateReorderRoutingMethod"}, - {"depth", 3}, - {"size", 4}}, + {{"name", "MultiGateReorderRoutingMethod"}, {"depth", 3}, {"size", 4}}, { - {"name_of_method", "LexiRouteRoutingMethod"}, + {"name", "LexiRouteRoutingMethod"}, {"depth", 3}, }}; std::vector rms = From 4f2ded6d7c1836ee78e5a004a897946a1068764d Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Fri, 18 Feb 2022 10:43:58 +0000 Subject: [PATCH 118/146] Update valid_operation description --- tket/src/Architecture/include/Architecture/Architecture.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index 1fc6ac4263..7765014a41 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -104,8 +104,8 @@ class Architecture : public ArchitectureBase> { node_set_t get_articulation_points(const Architecture &subarc) const; /** - * Returns true if the given operation can be executed on the Architecture - * connectivity graph. + * Returns true if the given operation acting on the given nodes + * can be executed on the Architecture connectivity graph. */ bool valid_operation( const OpType &optype, const std::vector &uids) const; From a53b7ee6541b1d062c87570f9b4bf14c064bb1d3 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Fri, 18 Feb 2022 10:55:57 +0000 Subject: [PATCH 119/146] reformat transform_test and remove "had" --- pytket/tests/transform_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytket/tests/transform_test.py b/pytket/tests/transform_test.py index 9152fa59dd..41eaf04233 100644 --- a/pytket/tests/transform_test.py +++ b/pytket/tests/transform_test.py @@ -1,4 +1,4 @@ - had # Copyright 2019-2022 Cambridge Quantum Computing +# Copyright 2019-2022 Cambridge Quantum Computing # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,7 +30,7 @@ RoutingPass, PlacementPass, CXMappingPass, - auto_rebase_pass, + auto_rebase_pass, auto_squash_pass, ) from pytket.predicates import CompilationUnit, NoMidMeasurePredicate # type: ignore From 499911547933267bcabe3ba417601767597ec5ea Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Fri, 18 Feb 2022 10:57:31 +0000 Subject: [PATCH 120/146] remove second auto_rebase_pass --- pytket/tests/transform_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pytket/tests/transform_test.py b/pytket/tests/transform_test.py index 41eaf04233..b2564e0ec7 100644 --- a/pytket/tests/transform_test.py +++ b/pytket/tests/transform_test.py @@ -22,7 +22,6 @@ KAKDecomposition, CommuteThroughMultis, RebaseCustom, - auto_rebase_pass, PauliSquash, FullPeepholeOptimise, DefaultMappingPass, From ac29545a1f8712e2006742cf2fca1fad9975dc8b Mon Sep 17 00:00:00 2001 From: Alec Edgington Date: Fri, 18 Feb 2022 14:59:13 +0000 Subject: [PATCH 121/146] Add missing import. --- pytket/tests/transform_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytket/tests/transform_test.py b/pytket/tests/transform_test.py index b2564e0ec7..61368b3967 100644 --- a/pytket/tests/transform_test.py +++ b/pytket/tests/transform_test.py @@ -20,6 +20,7 @@ from pytket.passes import ( # type: ignore RemoveRedundancies, KAKDecomposition, + SquashCustom, CommuteThroughMultis, RebaseCustom, PauliSquash, From e3ae6bc358996d867a16301765238e7f74a42313 Mon Sep 17 00:00:00 2001 From: Alec Edgington Date: Fri, 18 Feb 2022 15:50:00 +0000 Subject: [PATCH 122/146] Add option to tket-tests conan build determining whether full tests are run. If tket-tests:full=True, the compiler flag TKET_TESTS_FULL is defined. --- recipes/tket-tests/conanfile.py | 16 ++++++++++++---- tket/tests/CMakeLists.txt | 5 +++++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/recipes/tket-tests/conanfile.py b/recipes/tket-tests/conanfile.py index c719a3ff38..afd7ab6371 100644 --- a/recipes/tket-tests/conanfile.py +++ b/recipes/tket-tests/conanfile.py @@ -26,25 +26,33 @@ class TketTestsConan(ConanFile): description = "Unit tests for tket" topics = ("quantum", "computation", "compiler") settings = "os", "compiler", "build_type", "arch" - options = {"with_coverage": [True, False]} - default_options = {"with_coverage": False} + options = {"with_coverage": [True, False], "full": [True, False]} + default_options = {"with_coverage": False, "full": False} generators = "cmake" exports_sources = "../../tket/tests/*" requires = ("tket/1.0.1", "catch2/2.13.8") + _cmake = None + def validate(self): if self.options.with_coverage and self.settings.compiler != "gcc": raise ConanInvalidConfiguration( "`with_coverage` option only available with gcc" ) + def _configure_cmake(self): + if self._cmake is None: + self._cmake = CMake(self) + self._cmake.definitions["TESTS_FULL"] = self.options.full + self._cmake.configure() + return self._cmake + def configure(self): if self.options.with_coverage: self.options["tket"].profile_coverage = True def build(self): - cmake = CMake(self) - cmake.configure() + cmake = self._configure_cmake() cmake.build() def package(self): diff --git a/tket/tests/CMakeLists.txt b/tket/tests/CMakeLists.txt index eaad06f9a2..10d4a8359a 100644 --- a/tket/tests/CMakeLists.txt +++ b/tket/tests/CMakeLists.txt @@ -36,6 +36,11 @@ ENDIF() add_definitions(-DALL_LOGS) +set(TESTS_FULL no CACHE BOOL "Run full set of tests") + +if (TESTS_FULL) + add_definitions(-DTKET_TESTS_FULL) +endif() set(TKET_TESTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}) From 5245c1fb6240b5c46acd4e4aad15e31b15737849 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 15:50:12 +0000 Subject: [PATCH 123/146] remove TokenSwappingWithArch project; move files into Architecture --- recipes/tket/conanfile.py | 1 - .../ArchitectureMapping.cpp | 26 +++++----- .../BestTsaWithArch.cpp | 0 tket/src/Architecture/CMakeLists.txt | 7 ++- .../DistancesFromArchitecture.cpp | 5 +- .../NeighboursFromArchitecture.cpp | 12 ++--- .../Architecture}/ArchitectureMapping.hpp | 3 +- .../include/Architecture}/BestTsaWithArch.hpp | 4 +- .../DistancesFromArchitecture.hpp | 0 .../NeighboursFromArchitecture.hpp | 0 tket/src/CMakeLists.txt | 1 - tket/src/TokenSwappingWithArch/CMakeLists.txt | 48 ------------------- .../TokenSwapping/TestUtils/BestTsaTester.cpp | 2 +- .../TestUtils/FullTsaTesting.cpp | 6 +-- .../TestUtils/FullTsaTesting.hpp | 2 +- .../TestUtils/PartialTsaTesting.cpp | 4 +- .../TestUtils/PartialTsaTesting.hpp | 2 +- .../test_ArchitectureMappingEndToEnd.cpp | 6 +-- .../test_DistancesFromArchitecture.cpp | 2 +- .../test_RiverFlowPathFinder.cpp | 6 +-- .../test_SwapsFromQubitMapping.cpp | 2 +- 21 files changed, 49 insertions(+), 90 deletions(-) rename tket/src/{TokenSwappingWithArch => Architecture}/ArchitectureMapping.cpp (81%) rename tket/src/{TokenSwappingWithArch => Architecture}/BestTsaWithArch.cpp (100%) rename tket/src/{TokenSwappingWithArch => Architecture}/DistancesFromArchitecture.cpp (95%) rename tket/src/{TokenSwappingWithArch => Architecture}/NeighboursFromArchitecture.cpp (85%) rename tket/src/{TokenSwappingWithArch/include/TokenSwappingWithArch => Architecture/include/Architecture}/ArchitectureMapping.hpp (97%) rename tket/src/{TokenSwappingWithArch/include/TokenSwappingWithArch => Architecture/include/Architecture}/BestTsaWithArch.hpp (95%) rename tket/src/{TokenSwappingWithArch/include/TokenSwappingWithArch => Architecture/include/Architecture}/DistancesFromArchitecture.hpp (100%) rename tket/src/{TokenSwappingWithArch/include/TokenSwappingWithArch => Architecture/include/Architecture}/NeighboursFromArchitecture.hpp (100%) delete mode 100644 tket/src/TokenSwappingWithArch/CMakeLists.txt diff --git a/recipes/tket/conanfile.py b/recipes/tket/conanfile.py index e50e857bce..00875919a1 100644 --- a/recipes/tket/conanfile.py +++ b/recipes/tket/conanfile.py @@ -65,7 +65,6 @@ class TketConan(ConanFile): "Characterisation", "Converters", "TokenSwapping", - "TokenSwappingWithArch", "Mapping", "Placement", "MeasurementSetup", diff --git a/tket/src/TokenSwappingWithArch/ArchitectureMapping.cpp b/tket/src/Architecture/ArchitectureMapping.cpp similarity index 81% rename from tket/src/TokenSwappingWithArch/ArchitectureMapping.cpp rename to tket/src/Architecture/ArchitectureMapping.cpp index a0aa287022..e9970f974f 100644 --- a/tket/src/TokenSwappingWithArch/ArchitectureMapping.cpp +++ b/tket/src/Architecture/ArchitectureMapping.cpp @@ -36,8 +36,9 @@ ArchitectureMapping::ArchitectureMapping(const Architecture& arch) // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( citer == m_node_to_vertex_mapping.cend(), - "Duplicate node " << node.repr() << " at vertices " << citer->second - << ", " << ii); + std::stringstream() + << "Duplicate node " << node.repr() << " at vertices " + << citer->second << ", " << ii); // GCOVR_EXCL_STOP } m_node_to_vertex_mapping[node] = ii; @@ -72,10 +73,10 @@ ArchitectureMapping::ArchitectureMapping( // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( uids.size() == m_vertex_to_node_mapping.size(), - "passed in " << edges.size() << " edges, giving " - << m_vertex_to_node_mapping.size() - << " vertices; but the architecture object has " - << uids.size() << " vertices"); + std::stringstream() << "passed in " << edges.size() << " edges, giving " + << m_vertex_to_node_mapping.size() + << " vertices; but the architecture object has " + << uids.size() << " vertices"); // GCOVR_EXCL_STOP for (const UnitID& uid : uids) { @@ -83,8 +84,8 @@ ArchitectureMapping::ArchitectureMapping( // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( m_node_to_vertex_mapping.count(node) != 0, - "passed in " - << edges.size() << " edges, giving " + std::stringstream() + << "passed in " << edges.size() << " edges, giving " << m_vertex_to_node_mapping.size() << " vertices; but the architecture object has an unknown node " << node.repr()); @@ -100,9 +101,10 @@ const Node& ArchitectureMapping::get_node(size_t vertex) const { const auto num_vertices = number_of_vertices(); // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( - vertex < num_vertices, "invalid vertex " << vertex - << " (architecture only has " - << num_vertices << " vertices)"); + vertex < num_vertices, std::stringstream() + << "invalid vertex " << vertex + << " (architecture only has " << num_vertices + << " vertices)"); // GCOVR_EXCL_STOP return m_vertex_to_node_mapping[vertex]; @@ -113,7 +115,7 @@ size_t ArchitectureMapping::get_vertex(const Node& node) const { // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( citer != m_node_to_vertex_mapping.cend(), - "node " << node.repr() << " has no vertex number"); + std::stringstream() << "node " << node.repr() << " has no vertex number"); // GCOVR_EXCL_STOP return citer->second; } diff --git a/tket/src/TokenSwappingWithArch/BestTsaWithArch.cpp b/tket/src/Architecture/BestTsaWithArch.cpp similarity index 100% rename from tket/src/TokenSwappingWithArch/BestTsaWithArch.cpp rename to tket/src/Architecture/BestTsaWithArch.cpp diff --git a/tket/src/Architecture/CMakeLists.txt b/tket/src/Architecture/CMakeLists.txt index ead73a0507..8bf9bcffa1 100644 --- a/tket/src/Architecture/CMakeLists.txt +++ b/tket/src/Architecture/CMakeLists.txt @@ -20,11 +20,16 @@ endif() add_library(tket-${COMP} Architecture.cpp - ArchitectureGraphClasses.cpp) + ArchitectureGraphClasses.cpp + ArchitectureMapping.cpp + BestTsaWithArch.cpp + DistancesFromArchitecture.cpp + NeighboursFromArchitecture.cpp) list(APPEND DEPS_${COMP} Graphs OpType + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/TokenSwappingWithArch/DistancesFromArchitecture.cpp b/tket/src/Architecture/DistancesFromArchitecture.cpp similarity index 95% rename from tket/src/TokenSwappingWithArch/DistancesFromArchitecture.cpp rename to tket/src/Architecture/DistancesFromArchitecture.cpp index 4c277fb118..4900fffe2b 100644 --- a/tket/src/TokenSwappingWithArch/DistancesFromArchitecture.cpp +++ b/tket/src/Architecture/DistancesFromArchitecture.cpp @@ -76,8 +76,9 @@ size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( distance_entry > 0, - "DistancesFromArchitecture: architecture has " - << arch.n_nodes() << " vertices, " << arch.n_connections() + std::stringstream() + << "DistancesFromArchitecture: architecture has " << arch.n_nodes() + << " vertices, " << arch.n_connections() << " edges; returned diameter " << arch.get_diameter() << " and d(" << vertex1 << "," << vertex2 << ")=0. " diff --git a/tket/src/TokenSwappingWithArch/NeighboursFromArchitecture.cpp b/tket/src/Architecture/NeighboursFromArchitecture.cpp similarity index 85% rename from tket/src/TokenSwappingWithArch/NeighboursFromArchitecture.cpp rename to tket/src/Architecture/NeighboursFromArchitecture.cpp index bab81a70d2..36573813c5 100644 --- a/tket/src/TokenSwappingWithArch/NeighboursFromArchitecture.cpp +++ b/tket/src/Architecture/NeighboursFromArchitecture.cpp @@ -29,9 +29,9 @@ const std::vector& NeighboursFromArchitecture::operator()( const auto num_vertices = m_arch_mapping.number_of_vertices(); // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( - vertex < num_vertices, "get_neighbours: invalid vertex " - << vertex << " (only have " << num_vertices - << " vertices)"); + vertex < num_vertices, + std::stringstream() << "get_neighbours: invalid vertex " << vertex + << " (only have " << num_vertices << " vertices)"); // GCOVR_EXCL_STOP auto& neighbours = m_cached_neighbours[vertex]; if (!neighbours.empty()) { @@ -53,9 +53,9 @@ const std::vector& NeighboursFromArchitecture::operator()( // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( neighbour_vertex != vertex, - "get_neighbours: vertex " - << vertex << " for node " << node.repr() << " has " - << neighbour_nodes.size() + std::stringstream() + << "get_neighbours: vertex " << vertex << " for node " + << node.repr() << " has " << neighbour_nodes.size() << " neighbours, and lists itself as a neighbour (loops not " "allowed)"); // GCOVR_EXCL_STOP diff --git a/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/ArchitectureMapping.hpp b/tket/src/Architecture/include/Architecture/ArchitectureMapping.hpp similarity index 97% rename from tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/ArchitectureMapping.hpp rename to tket/src/Architecture/include/Architecture/ArchitectureMapping.hpp index eecfc5bd92..63bcffdf1b 100644 --- a/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/ArchitectureMapping.hpp +++ b/tket/src/Architecture/include/Architecture/ArchitectureMapping.hpp @@ -19,7 +19,8 @@ namespace tket { -/** For mapping between nodes in an architecture and size_t vertex numbers. +/** Intended for use with TokenSwapping. + * For mapping between nodes in an architecture and size_t vertex numbers. * The vertex numbers are merely the indices of each Node * within the vector returned by the get_all_nodes() function. * diff --git a/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/BestTsaWithArch.hpp b/tket/src/Architecture/include/Architecture/BestTsaWithArch.hpp similarity index 95% rename from tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/BestTsaWithArch.hpp rename to tket/src/Architecture/include/Architecture/BestTsaWithArch.hpp index 3df8c50ac7..262242b4d6 100644 --- a/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/BestTsaWithArch.hpp +++ b/tket/src/Architecture/include/Architecture/BestTsaWithArch.hpp @@ -45,13 +45,13 @@ struct BestTsaWithArch { /** Given an architecture and desired source->target node mapping, * compute a sequence of swaps (attempts to be as short as possible) * which will perform that mapping. - * Note that it may use ALL th enodes in the architecture, + * Note that it may use ALL the nodes in the architecture, * not just the ones occurring in the node_mapping. * If you wish certain nodes to be fixed, specify them in the mapping * (with equal source and target). * (However, note that they might STILL be moved, as long as by the end * they are back at the start. If you really don't to involve a particular - * node, you mustremove it completely from the architecture). + * node, you must remove it completely from the architecture). * KNOWN BUG: it may give an error with disconnected architectures. * @param architecture The raw object containing the graph. * @param node_mapping The desired source->target node mapping. diff --git a/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/DistancesFromArchitecture.hpp b/tket/src/Architecture/include/Architecture/DistancesFromArchitecture.hpp similarity index 100% rename from tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/DistancesFromArchitecture.hpp rename to tket/src/Architecture/include/Architecture/DistancesFromArchitecture.hpp diff --git a/tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/NeighboursFromArchitecture.hpp b/tket/src/Architecture/include/Architecture/NeighboursFromArchitecture.hpp similarity index 100% rename from tket/src/TokenSwappingWithArch/include/TokenSwappingWithArch/NeighboursFromArchitecture.hpp rename to tket/src/Architecture/include/Architecture/NeighboursFromArchitecture.hpp diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index f6d5b4b7cb..7dbc490a37 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -71,7 +71,6 @@ list(APPEND TKET_COMPS PauliGraph Circuit Architecture - TokenSwappingWithArch Simulation Diagonalisation Program diff --git a/tket/src/TokenSwappingWithArch/CMakeLists.txt b/tket/src/TokenSwappingWithArch/CMakeLists.txt deleted file mode 100644 index b975711762..0000000000 --- a/tket/src/TokenSwappingWithArch/CMakeLists.txt +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2019-2022 Cambridge Quantum Computing -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -project(tket-${COMP}) - -if (NOT ${COMP} STREQUAL "TokenSwappingWithArch") - message(FATAL_ERROR "Unexpected component name.") -endif() - -add_library(tket-${COMP} - ArchitectureMapping.cpp - BestTsaWithArch.cpp - DistancesFromArchitecture.cpp - NeighboursFromArchitecture.cpp - ) - -list(APPEND DEPS_${COMP} - Architecture - Graphs - OpType - TokenSwapping - Utils) - -foreach(DEP ${DEPS_${COMP}}) - target_include_directories( - tket-${COMP} PRIVATE ${TKET_${DEP}_INCLUDE_DIR}) - target_link_libraries( - tket-${COMP} PRIVATE tket-${DEP}) -endforeach() - -target_include_directories(tket-${COMP} - PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR} - ${TKET_${COMP}_INCLUDE_DIR} - ${TKET_${COMP}_INCLUDE_DIR}/${COMP}) - -target_link_libraries(tket-${COMP} PRIVATE ${CONAN_LIBS}) diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp index 89d8283325..1137df8b7d 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp @@ -18,7 +18,7 @@ #include "TokenSwapping/VertexMappingFunctions.hpp" #include "TokenSwapping/VertexSwapResult.hpp" -#include "TokenSwappingWithArch/BestTsaWithArch.hpp" +#include "Architecture/BestTsaWithArch.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp index 9dc6aeb160..9f0df5f882 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp @@ -20,9 +20,9 @@ #include "TokenSwapping/DistanceFunctions.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/VertexSwapResult.hpp" -#include "TokenSwappingWithArch/ArchitectureMapping.hpp" -#include "TokenSwappingWithArch/DistancesFromArchitecture.hpp" -#include "TokenSwappingWithArch/NeighboursFromArchitecture.hpp" +#include "Architecture/ArchitectureMapping.hpp" +#include "Architecture/DistancesFromArchitecture.hpp" +#include "Architecture/NeighboursFromArchitecture.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp index dc115df395..709facaa98 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp @@ -16,7 +16,7 @@ #include "TokenSwapping/PartialTsaInterface.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" -#include "TokenSwappingWithArch/ArchitectureMapping.hpp" +#include "Architecture/ArchitectureMapping.hpp" #include "Utils/RNG.hpp" namespace tket { diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp index 0637d577fe..ac90f678c2 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp @@ -20,8 +20,8 @@ #include "TokenSwapping/DistanceFunctions.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/VertexSwapResult.hpp" -#include "TokenSwappingWithArch/DistancesFromArchitecture.hpp" -#include "TokenSwappingWithArch/NeighboursFromArchitecture.hpp" +#include "Architecture/DistancesFromArchitecture.hpp" +#include "Architecture/NeighboursFromArchitecture.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp index a3de1aa99a..c36f9a7f54 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp @@ -15,7 +15,7 @@ #pragma once #include "TokenSwapping/PartialTsaInterface.hpp" -#include "TokenSwappingWithArch/ArchitectureMapping.hpp" +#include "Architecture/ArchitectureMapping.hpp" #include "Utils/RNG.hpp" namespace tket { diff --git a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp index 3d46f7c455..7c83c83152 100644 --- a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp +++ b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp @@ -15,9 +15,9 @@ #include #include -#include "TokenSwappingWithArch/ArchitectureMapping.hpp" -#include "TokenSwappingWithArch/DistancesFromArchitecture.hpp" -#include "TokenSwappingWithArch/NeighboursFromArchitecture.hpp" +#include "Architecture/ArchitectureMapping.hpp" +#include "Architecture/DistancesFromArchitecture.hpp" +#include "Architecture/NeighboursFromArchitecture.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp index 692ad6e296..ac62c2a1cb 100644 --- a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp +++ b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp @@ -16,7 +16,7 @@ #include #include -#include "TokenSwappingWithArch/DistancesFromArchitecture.hpp" +#include "Architecture/DistancesFromArchitecture.hpp" using Catch::Matchers::Contains; using std::vector; diff --git a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp index 4f932da85e..5978b885a0 100644 --- a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp +++ b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp @@ -15,11 +15,11 @@ #include #include +#include "Architecture/ArchitectureMapping.hpp" +#include "Architecture/DistancesFromArchitecture.hpp" +#include "Architecture/NeighboursFromArchitecture.hpp" #include "TestUtils/ArchitectureEdgesReimplementation.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" -#include "TokenSwappingWithArch/ArchitectureMapping.hpp" -#include "TokenSwappingWithArch/DistancesFromArchitecture.hpp" -#include "TokenSwappingWithArch/NeighboursFromArchitecture.hpp" #include "Utils/RNG.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp b/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp index cc990dba55..80a7d09b19 100644 --- a/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp +++ b/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp @@ -15,7 +15,7 @@ #include #include -#include "TokenSwappingWithArch/BestTsaWithArch.hpp" +#include "Architecture/BestTsaWithArch.hpp" #include "Utils/RNG.hpp" using std::vector; From 3435365f76da82c7fcf1ddedd937c4f0dc32cf02 Mon Sep 17 00:00:00 2001 From: Alec Edgington Date: Fri, 18 Feb 2022 15:56:26 +0000 Subject: [PATCH 124/146] Set "full tests" option on scheduled CI runs only. --- .github/workflows/build_and_test.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 9a4ba30c0a..b416313b07 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -55,6 +55,9 @@ jobs: ${conan_cmd} profile update settings.compiler.libcxx=libstdc++11 tket ${conan_cmd} profile update options.tket:shared=True tket echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: ${CONAN_CMD} profile update options.tket-tests:full=True tket - name: add remote run: ${CONAN_CMD} remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan - name: Install ninja and ccache @@ -177,6 +180,9 @@ jobs: conan profile update options.tket:shared=True tket export CC=`which conan` echo "CONAN_CMD=${CC}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket - name: add remote run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force - name: Install boost @@ -268,6 +274,9 @@ jobs: conan profile update options.tket:shared=True tket export CC=`which conan` echo "CONAN_CMD=${CC}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket - name: add remote run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force - name: Install boost @@ -366,6 +375,9 @@ jobs: conan profile update options.tket:shared=True tket $conan_cmd = (gcm conan).Path echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket - name: add remote run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan - name: Cache tket build From 237e23ee7754fb7a1364ae52322513518c6900e4 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 15:57:18 +0000 Subject: [PATCH 125/146] change TKET_ASSERT_WITH_MESSAGE to have parentheses around message --- tket/src/Gate/GateUnitarySparseMatrix.cpp | 11 ++- tket/src/Graphs/AdjacencyData.cpp | 20 ++--- tket/src/Graphs/BruteForceColouring.cpp | 14 ++-- tket/src/Graphs/ColouringPriority.cpp | 18 ++--- tket/src/Graphs/GraphColouring.cpp | 10 +-- .../src/TokenSwapping/RiverFlowPathFinder.cpp | 18 ++--- .../TSAUtils/VertexMappingFunctions.cpp | 2 +- .../VectorListHybridSkeleton.cpp | 9 ++- tket/src/Utils/CMakeLists.txt | 1 + tket/src/Utils/GetTketAssertMessage.cpp | 19 +++++ tket/src/Utils/include/Utils/Assert.hpp | 76 ++++++++++--------- 11 files changed, 113 insertions(+), 85 deletions(-) create mode 100644 tket/src/Utils/GetTketAssertMessage.cpp diff --git a/tket/src/Gate/GateUnitarySparseMatrix.cpp b/tket/src/Gate/GateUnitarySparseMatrix.cpp index aac7fba756..8146384ff5 100644 --- a/tket/src/Gate/GateUnitarySparseMatrix.cpp +++ b/tket/src/Gate/GateUnitarySparseMatrix.cpp @@ -154,12 +154,11 @@ std::vector GateUnitarySparseMatrix::get_unitary_triplets( gate, primitive_type, abs_epsilon); } catch (const GateUnitaryMatrixError& e) { // GCOVR_EXCL_START - std::stringstream ss; - OpDesc desc(primitive_type); - ss << "Converting " << gate.get_name() - << " to sparse unitary, via adding controls to gate type " - << desc.name() << ": " << e.what(); - TKET_ASSERT_WITH_MESSAGE(false, ss.str()); + TKET_ASSERT_WITH_MESSAGE( + false, std::stringstream() + << "Converting " << gate.get_name() + << " to sparse unitary, via adding controls to gate type " + << OpDesc(primitive_type).name() << ": " << e.what()); // GCOVR_EXCL_STOP } } diff --git a/tket/src/Graphs/AdjacencyData.cpp b/tket/src/Graphs/AdjacencyData.cpp index 85ab1110c4..25b1363e2a 100644 --- a/tket/src/Graphs/AdjacencyData.cpp +++ b/tket/src/Graphs/AdjacencyData.cpp @@ -68,7 +68,8 @@ const set& AdjacencyData::get_neighbours( // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( vertex < m_cleaned_data.size(), - "AdjacencyData: get_neighbours called with invalid vertex " + std::stringstream() + << "AdjacencyData: get_neighbours called with invalid vertex " << vertex << "; there are only " << m_cleaned_data.size() << " vertices"); // GCOVR_EXCL_STOP @@ -105,9 +106,9 @@ bool AdjacencyData::edge_exists(std::size_t i, std::size_t j) const { // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( (i < m_cleaned_data.size() && j < m_cleaned_data.size()), - "edge_exists called with vertices " - << i << ", " << j << ", but there are only " << m_cleaned_data.size() - << " vertices"); + std::stringstream() << "edge_exists called with vertices " << i << ", " + << j << ", but there are only " + << m_cleaned_data.size() << " vertices"); // GCOVR_EXCL_STOP return m_cleaned_data[i].count(j) != 0; } @@ -148,13 +149,14 @@ AdjacencyData::AdjacencyData( for (std::size_t j : raw_data[i]) { // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( - i != j || allow_loops, "Vertex " << i << " out of " - << m_cleaned_data.size() - << " has a loop."); + i != j || allow_loops, + std::stringstream() << "Vertex " << i << " out of " + << m_cleaned_data.size() << " has a loop."); TKET_ASSERT_WITH_MESSAGE( j < m_cleaned_data.size(), - "Vertex " << i << " has illegal neighbour vertex " << j - << ", the size is " << m_cleaned_data.size()); + std::stringstream() + << "Vertex " << i << " has illegal neighbour vertex " << j + << ", the size is " << m_cleaned_data.size()); // GCOVR_EXCL_STOP m_cleaned_data[i].insert(j); m_cleaned_data[j].insert(i); diff --git a/tket/src/Graphs/BruteForceColouring.cpp b/tket/src/Graphs/BruteForceColouring.cpp index 7b2d9744fd..29701b3135 100644 --- a/tket/src/Graphs/BruteForceColouring.cpp +++ b/tket/src/Graphs/BruteForceColouring.cpp @@ -216,13 +216,13 @@ BruteForceColouring::BruteForceColouring( throw std::runtime_error("suggested_number_of_colours hit number_of_nodes"); } catch (const std::exception& e) { // GCOVR_EXCL_START - std::stringstream ss; - ss << "initial_suggested_number_of_colours = " - << initial_suggested_number_of_colours - << ", reached suggested_number_of_colours = " - << suggested_number_of_colours << ", had " << number_of_nodes - << " nodes. Error: " << e.what() << priority.print_raw_data(); - TKET_ASSERT_WITH_MESSAGE(false, ss.str()); + TKET_ASSERT_WITH_MESSAGE( + false, std::stringstream() << "initial_suggested_number_of_colours = " + << initial_suggested_number_of_colours + << ", reached suggested_number_of_colours = " + << suggested_number_of_colours << ", had " + << number_of_nodes << " nodes. Error: " + << e.what() << priority.print_raw_data()); // GCOVR_EXCL_STOP } } diff --git a/tket/src/Graphs/ColouringPriority.cpp b/tket/src/Graphs/ColouringPriority.cpp index ede03a1ff3..c60515c979 100644 --- a/tket/src/Graphs/ColouringPriority.cpp +++ b/tket/src/Graphs/ColouringPriority.cpp @@ -88,15 +88,15 @@ static void fill_initial_node_sequence( // GCOVR_EXCL_STOP } catch (const std::exception& e) { // GCOVR_EXCL_START - std::stringstream ss; - ss << "ColouringPriority: fill_initial_node_sequence: initial" - << " clique size " << initial_clique.size() << ", " - << vertices_in_component.size() << " vertices in" - << " this component (full graph has " - << adjacency_data.get_number_of_vertices() << " vertices)." - << " So far, filled " << nodes.size() << " nodes." - << " Error: " << e.what(); - TKET_ASSERT_WITH_MESSAGE(false, ss.str()); + TKET_ASSERT_WITH_MESSAGE( + false, std::stringstream() + << "ColouringPriority: fill_initial_node_sequence: initial" + << " clique size " << initial_clique.size() << ", " + << vertices_in_component.size() << " vertices in" + << " this component (full graph has " + << adjacency_data.get_number_of_vertices() << " vertices)." + << " So far, filled " << nodes.size() << " nodes." + << " Error: " << e.what()); // GCOVR_EXCL_STOP } } diff --git a/tket/src/Graphs/GraphColouring.cpp b/tket/src/Graphs/GraphColouring.cpp index 0c551a6b67..a50707c1e0 100644 --- a/tket/src/Graphs/GraphColouring.cpp +++ b/tket/src/Graphs/GraphColouring.cpp @@ -94,11 +94,11 @@ static void colour_single_component( } colour_to_assign = colour; } catch (const exception& e) { - stringstream ss; - ss << "colouring single component " << component_index - << " returned vertex " << vertex << " with colour " << colour << " : " - << e.what(); - TKET_ASSERT_WITH_MESSAGE(false, ss.str()); + TKET_ASSERT_WITH_MESSAGE( + false, stringstream() + << "colouring single component " << component_index + << " returned vertex " << vertex << " with colour " + << colour << " : " << e.what()); } // GCOVR_EXCL_STOP } diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp index 03d1e47368..9ff42bd679 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -118,19 +118,19 @@ void RiverFlowPathFinder::Impl::grow_path( TKET_ASSERT_WITH_MESSAGE( neighbour_distance_to_target == remaining_distance || neighbour_distance_to_target == remaining_distance + 1, - "d(v_" << path.back() << ", v_" << target_vertex - << ")=" << remaining_distance << ". But v_" << path.back() - << " has neighbour v_" << neighbour << ", at distance " - << neighbour_distance_to_target << " to the target v_" - << target_vertex); + std::stringstream() << "d(v_" << path.back() << ", v_" << target_vertex + << ")=" << remaining_distance << ". But v_" + << path.back() << " has neighbour v_" << neighbour + << ", at distance " << neighbour_distance_to_target + << " to the target v_" << target_vertex); // GCOVR_EXCL_STOP } // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( - !candidate_moves.empty(), "No neighbours of v_" - << path.back() << " at correct distance " - << remaining_distance - 1 - << " to target vertex v_" << target_vertex); + !candidate_moves.empty(), + std::stringstream() << "No neighbours of v_" << path.back() + << " at correct distance " << remaining_distance - 1 + << " to target vertex v_" << target_vertex); // GCOVR_EXCL_STOP const auto& choice = rng.get_element(candidate_moves); diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index 9b67ca2aae..0eb1fc18ea 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -40,7 +40,7 @@ void check_mapping( // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( work_mapping.count(entry.second) == 0, - "Vertices v_" << entry.first << " and v_" << work_mapping[entry.second] + std::stringstream() << "Vertices v_" << entry.first << " and v_" << work_mapping[entry.second] << " both have the same target vertex v_" << entry.second); // GCOVR_EXCL_STOP diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp index b0a736b7b3..be7101f70b 100644 --- a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -164,10 +164,11 @@ void VectorListHybridSkeleton::erase_interval( // GCOVR_EXCL_START TKET_ASSERT_WITH_MESSAGE( last_element_index < m_links.size(), - "erase_interval with start index " - << index << ", number_of_elements=" << number_of_elements - << ", size " << m_links.size() << ", runs out of elements at N=" - << nn << " (got index " << last_element_index << ")"); + std::stringstream() + << "erase_interval with start index " << index + << ", number_of_elements=" << number_of_elements << ", size " + << m_links.size() << ", runs out of elements at N=" << nn + << " (got index " << last_element_index << ")"); // GCOVR_EXCL_STOP } TKET_ASSERT(number_of_elements <= m_size); diff --git a/tket/src/Utils/CMakeLists.txt b/tket/src/Utils/CMakeLists.txt index 16ec28d8d2..1175cdcd23 100644 --- a/tket/src/Utils/CMakeLists.txt +++ b/tket/src/Utils/CMakeLists.txt @@ -21,6 +21,7 @@ endif() add_library(tket-${COMP} TketLog.cpp UnitID.cpp + GetTketAssertMessage.cpp HelperFunctions.cpp MatrixAnalysis.cpp PauliStrings.cpp diff --git a/tket/src/Utils/GetTketAssertMessage.cpp b/tket/src/Utils/GetTketAssertMessage.cpp new file mode 100644 index 0000000000..fe58bc9fe4 --- /dev/null +++ b/tket/src/Utils/GetTketAssertMessage.cpp @@ -0,0 +1,19 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "GetTketAssertMessage.hpp" + +std::string get_tket_assert_message(const std::stringstream& ss) { + return ss.str(); +} diff --git a/tket/src/Utils/include/Utils/Assert.hpp b/tket/src/Utils/include/Utils/Assert.hpp index 66a465abb7..ec8c82b7ed 100644 --- a/tket/src/Utils/include/Utils/Assert.hpp +++ b/tket/src/Utils/include/Utils/Assert.hpp @@ -15,23 +15,28 @@ #pragma once #include -#include +#include "GetTketAssertMessage.hpp" #include "TketLog.hpp" /** * If `condition` is not satisfied, log a diagnostic message and abort, * including the extra message "msg". - * "msg" is passed directly to a stringstream, so you can write: + * "msg" could be an object, directly writable to a stringstream, + * so you could write: * - * TKET_ASSERT_WITH_MESSAGE(xcritical(ss.str()); \ - std::abort(); \ - } \ - } catch (const std::exception& ex) { \ - std::stringstream ss; \ - ss << "Evaluating assertion condition '" << #condition << "' (" \ - << __FILE__ << " : " << __func__ << " : " << __LINE__ \ - << ") threw unexpected exception: '" << ex.what() << "'. " << msg \ - << " Aborting."; \ - tket::tket_log()->critical(ss.str()); \ - std::abort(); \ - } catch (...) { \ - std::stringstream ss; \ - ss << "Evaluating assertion condition '" << #condition << "' (" \ - << __FILE__ << " : " << __func__ << " : " << __LINE__ \ - << ") Threw unknown exception. " << msg << " Aborting."; \ - tket::tket_log()->critical(ss.str()); \ - std::abort(); \ - } \ +#define TKET_ASSERT_WITH_MESSAGE(condition, msg) \ + do { \ + try { \ + if (!(condition)) { \ + std::stringstream ss; \ + ss << "Assertion '" << #condition << "' (" << __FILE__ << " : " \ + << __func__ << " : " << __LINE__ << ") failed. " \ + << get_tket_assert_message((msg)) << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } \ + } catch (const std::exception& ex) { \ + std::stringstream ss; \ + ss << "Evaluating assertion condition '" << #condition << "' (" \ + << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") threw unexpected exception: '" << ex.what() << "'. " \ + << get_tket_assert_message((msg)) << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } catch (...) { \ + std::stringstream ss; \ + ss << "Evaluating assertion condition '" << #condition << "' (" \ + << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") Threw unknown exception. " << get_tket_assert_message((msg)) \ + << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } \ } while (0) #define TKET_ASSERT(condition) \ From 042ab9b32ca15e55f54c32bea979513f7f462a90 Mon Sep 17 00:00:00 2001 From: Alec Edgington Date: Fri, 18 Feb 2022 16:00:53 +0000 Subject: [PATCH 126/146] Add note to README. --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 726d33f40a..221f6fb767 100644 --- a/README.md +++ b/README.md @@ -172,6 +172,10 @@ conan create --profile=tket recipes/tket-tests If you want to build them without running them, pass `--test-folder None` to the `conan` command. (You can still run them manually afterwards.) +Some tests (those that add significantly to the runtime) are not built by +default. To build all tests, add `-o tket-tests:full=True` to the above +`conan create` command. + There is also a small set of property-based tests which you can build and run with: From d40c098ffce1a9f7922922a84edc2185076daa2d Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 16:01:27 +0000 Subject: [PATCH 127/146] rename HybridTSA_00 to HybridTsa --- tket/src/TokenSwapping/HybridTsa.cpp | 2 +- tket/tests/TokenSwapping/test_FullTsa.cpp | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tket/src/TokenSwapping/HybridTsa.cpp b/tket/src/TokenSwapping/HybridTsa.cpp index 304b606a78..b8b15c39a7 100644 --- a/tket/src/TokenSwapping/HybridTsa.cpp +++ b/tket/src/TokenSwapping/HybridTsa.cpp @@ -23,7 +23,7 @@ namespace tket { namespace tsa_internal { HybridTsa::HybridTsa() { - m_name = "HybridTSA_00"; + m_name = "HybridTsa"; m_trivial_tsa.set(TrivialTSA::Options::BREAK_AFTER_PROGRESS); } diff --git a/tket/tests/TokenSwapping/test_FullTsa.cpp b/tket/tests/TokenSwapping/test_FullTsa.cpp index 369a31f948..645381da49 100644 --- a/tket/tests/TokenSwapping/test_FullTsa.cpp +++ b/tket/tests/TokenSwapping/test_FullTsa.cpp @@ -86,7 +86,7 @@ SCENARIO("Full TSA: stars") { } CHECK( tester.results.str() == - "[Stars:HybridTSA_00: 400 probs; 1978 toks; 1623 tot.lb]\n" + "[Stars:HybridTsa: 400 probs; 1978 toks; 1623 tot.lb]\n" "[Total swaps: 2632 2588 2550 2539 2539 2550]\n" "[Winners: joint: 360 381 392 400 400 392 undisputed: 0 0 0 0 0 0]"); @@ -125,7 +125,7 @@ SCENARIO("Full TSA: wheels") { } CHECK( tester.results.str() == - "[Wheels:HybridTSA_00: 400 probs; 1978 toks; 1533 tot.lb]\n" + "[Wheels:HybridTsa: 400 probs; 1978 toks; 1533 tot.lb]\n" "[Total swaps: 2482 2462 2430 2422 2422 2430]\n" "[Winners: joint: 374 384 395 400 400 395 undisputed: 0 0 0 0 0 0]"); @@ -160,7 +160,7 @@ SCENARIO("Full TSA: Rings") { // get_square_grid_edges). CHECK( tester.results.str() == - "[Rings:HybridTSA_00: 400 probs; 1802 toks; 3193 tot.lb]\n" + "[Rings:HybridTsa: 400 probs; 1802 toks; 3193 tot.lb]\n" "[Total swaps: 6302 5942 5118 5115 5113 5118]\n" "[Winners: joint: 292 328 399 399 400 399 undisputed: 0 0 0 0 1 0]"); @@ -208,7 +208,7 @@ SCENARIO("Full TSA: Square Grids") { CHECK( tester.results.str() == - "[Square grids:HybridTSA_00: 200 probs; 2746 toks; 4323 tot.lb]\n" + "[Square grids:HybridTsa: 200 probs; 2746 toks; 4323 tot.lb]\n" "[Total swaps: 7083 7015 6863 6846 6842 6863]\n" "[Winners: joint: 148 163 188 198 200 188 undisputed: 0 0 0 0 2 0]"); @@ -243,7 +243,7 @@ SCENARIO("Full TSA: Random trees") { } CHECK( tester.results.str() == - "[Trees:HybridTSA_00: 300 probs; 2158 toks; 2963 tot.lb]\n" + "[Trees:HybridTsa: 300 probs; 2158 toks; 2963 tot.lb]\n" "[Total swaps: 5216 5132 4844 4828 4817 4844]\n" "[Winners: joint: 227 251 286 296 300 286 undisputed: 0 0 0 0 4 0]"); From 080a87849fcc34be2717d20321a319d269142ec6 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 16:08:26 +0000 Subject: [PATCH 128/146] Replace TSGlobalTestParameters with #ifdef TKET_TESTS_FULL --- .../test_SwapSequenceReductions.cpp | 75 +++++++------ .../TestUtils/TSGlobalTestParameters.hpp | 37 ------- .../test_BestTsaFixedSwapSequences.cpp | 104 +++++++++--------- 3 files changed, 89 insertions(+), 127 deletions(-) delete mode 100644 tket/tests/TokenSwapping/TestUtils/TSGlobalTestParameters.hpp diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp index d8a8cb5a59..de71d7086d 100644 --- a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp @@ -17,7 +17,6 @@ #include "../Data/FixedCompleteSolutions.hpp" #include "../Data/FixedSwapSequences.hpp" -#include "../TestUtils/TSGlobalTestParameters.hpp" #include "SwapSequenceReductionTester.hpp" using std::vector; @@ -51,6 +50,8 @@ static void check_final_messages( // Reduce the fixed swap sequences, with edge set implicitly defined // by the swaps themselves. SCENARIO("Fixed swap sequences reduction") { +#ifdef TKET_TESTS_FULL + // The long tests take ~5 seconds on a 2021 Windows laptop. vector expected_messages{ "[n=0, Full tokens: init segm optim? true]\n" "[478 equal probs (17115); 2 reduced probs (25 vs 29)]\n" @@ -68,29 +69,27 @@ SCENARIO("Fixed swap sequences reduction") { "[658 equal probs (12376); 238 reduced probs (12962 vs 13463)]\n" "[Overall reduction 25338 vs 25839: 1%]"}; - unsigned skip_number = 1; - if (!TSGlobalTestParameters().run_long_tests) { - // The long tests take ~5 seconds on a 2021 Windows laptop; - // the shorter tests take ~0.4 seconds. - skip_number = 20; - expected_messages = vector{ - - "[n=0, Full tokens: init segm optim? true]\n" - "[25 equal probs (846); 0 reduced probs (0 vs 0)]\n" - "[Overall reduction 846 vs 846: 0%]", + const unsigned skip_number = 1; +#else + // The shorter tests take ~0.4 seconds. + vector expected_messages{ + "[n=0, Full tokens: init segm optim? true]\n" + "[25 equal probs (846); 0 reduced probs (0 vs 0)]\n" + "[Overall reduction 846 vs 846: 0%]", - "[n=1, Partial tokens: init segm optim? true]\n" - "[46 equal probs (1348); 0 reduced probs (0 vs 0)]\n" - "[Overall reduction 1348 vs 1348: 0%]", + "[n=1, Partial tokens: init segm optim? true]\n" + "[46 equal probs (1348); 0 reduced probs (0 vs 0)]\n" + "[Overall reduction 1348 vs 1348: 0%]", - "[n=2, Full tokens: init segm optim? false]\n" - "[24 equal probs (822); 1 reduced probs (22 vs 24)]\n" - "[Overall reduction 844 vs 846: 0%]", + "[n=2, Full tokens: init segm optim? false]\n" + "[24 equal probs (822); 1 reduced probs (22 vs 24)]\n" + "[Overall reduction 844 vs 846: 0%]", - "[n=3, Partial tokens: init segm optim? false]\n" - "[34 equal probs (461); 12 reduced probs (844 vs 887)]\n" - "[Overall reduction 1305 vs 1348: 3%]"}; - } + "[n=3, Partial tokens: init segm optim? false]\n" + "[34 equal probs (461); 12 reduced probs (844 vs 887)]\n" + "[Overall reduction 1305 vs 1348: 3%]"}; + const unsigned skip_number = 20; +#endif const FixedSwapSequences fixed_sequences; SwapSequenceReductionTester tester; @@ -134,6 +133,8 @@ SCENARIO("Fixed swap sequences reduction") { // The actual problem input data: the graph may have extra edges // not present in the returned solution. SCENARIO("Fixed complete problems") { +#ifdef TKET_TESTS_FULL + // The long tests take ~10 seconds on a 2021 Windows laptop. vector expected_messages{ "[n=0, Small: init segm optim? false]\n" "[249 equal probs (1353); 29 reduced probs (163 vs 204)]\n" @@ -147,25 +148,23 @@ SCENARIO("Fixed complete problems") { "[164 equal probs (12771); 408 reduced probs (43946 vs 45894)]\n" "[Overall reduction 56717 vs 58665: 3%]"}; - unsigned skip_number = 1; - - if (!TSGlobalTestParameters().run_long_tests) { - // The long tests take ~10 seconds on a 2021 Windows laptop; - // the shorter tests take ~0.4 seconds. - skip_number = 20; - expected_messages = vector{ - "[n=0, Small: init segm optim? false]\n" - "[8 equal probs (48); 1 reduced probs (9 vs 10)]\n" - "[Overall reduction 57 vs 58: 1%]", + const unsigned skip_number = 1; +#else + // The shorter tests take ~0.4 seconds. + vector expected_messages{ + "[n=0, Small: init segm optim? false]\n" + "[8 equal probs (48); 1 reduced probs (9 vs 10)]\n" + "[Overall reduction 57 vs 58: 1%]", - "[n=1, Medium: init segm optim? false]\n" - "[8 equal probs (138); 1 reduced probs (23 vs 24)]\n" - "[Overall reduction 161 vs 162: 0%]", + "[n=1, Medium: init segm optim? false]\n" + "[8 equal probs (138); 1 reduced probs (23 vs 24)]\n" + "[Overall reduction 161 vs 162: 0%]", - "[n=2, Large: init segm optim? false]\n" - "[10 equal probs (928); 16 reduced probs (1657 vs 1743)]\n" - "[Overall reduction 2585 vs 2671: 3%]"}; - } + "[n=2, Large: init segm optim? false]\n" + "[10 equal probs (928); 16 reduced probs (1657 vs 1743)]\n" + "[Overall reduction 2585 vs 2671: 3%]"}; + const unsigned skip_number = 20; +#endif SwapSequenceReductionTester::Options options; options.optimise_initial_segment_only = false; diff --git a/tket/tests/TokenSwapping/TestUtils/TSGlobalTestParameters.hpp b/tket/tests/TokenSwapping/TestUtils/TSGlobalTestParameters.hpp deleted file mode 100644 index 9439b43170..0000000000 --- a/tket/tests/TokenSwapping/TestUtils/TSGlobalTestParameters.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -namespace tket { -namespace tsa_internal { -namespace tests { - -/** If we want to use the same adjustable parameters across all - * TokenSwapping tests simultaneously, put them here. - */ -struct TSGlobalTestParameters { - /** Running all the token swapping tests can take ~30 seconds - * on an ordinary laptop. Set this to false in order to test - * a smaller set. - */ - bool run_long_tests; - - // TSGlobalTestParameters() : run_long_tests(true) {} - TSGlobalTestParameters() : run_long_tests(false) {} -}; - -} // namespace tests -} // namespace tsa_internal -} // namespace tket diff --git a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp index be88c00b8e..1bcd0ca242 100644 --- a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp +++ b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp @@ -17,7 +17,6 @@ #include "Data/FixedCompleteSolutions.hpp" #include "Data/FixedSwapSequences.hpp" #include "TestUtils/BestTsaTester.hpp" -#include "TestUtils/TSGlobalTestParameters.hpp" /// TODO: The swap table optimiser currently tries to optimise many segments; /// certainly it could be cut down, experimentation is needed @@ -152,9 +151,12 @@ struct Summary { SCENARIO("Best TSA : solve problems from fixed swap sequences") { FixedSwapSequences sequences; - CHECK(sequences.full.size() == 453); - std::string full_seq_str = + CHECK(sequences.partial.size() == 755); + +#ifdef TKET_TESTS_FULL + // The "long" tests take ~6 seconds on an ordinary 2021 Windows laptop. + const std::string full_seq_str = "[248 equal (6088); 104 BETTER (4645 vs 4979): av 7% decr\n" "101 WORSE (5893 vs 5451): av 8% incr]"; @@ -163,33 +165,30 @@ SCENARIO("Best TSA : solve problems from fixed swap sequences") { // for sure without an exhaustive search; there is probably no known // non-exponential time algorithm to find the optimal solution). // So, (probably) getting within 1% of the optimal answer seems pretty good. - double full_seq_improvement = -0.653832; + const double full_seq_improvement = -0.653832; - CHECK(sequences.partial.size() == 755); - std::string partial_seq_str = + const std::string partial_seq_str = "[455 equal (6487); 165 BETTER (7044 vs 7457): av 7% decr\n" "135 WORSE (9124 vs 8604): av 6% incr]"; - double partial_seq_improvement = -0.474543; + const double partial_seq_improvement = -0.474543; +#else + // The reduced tests take ~50 milliseconds + // (and are also biased towards smaller problems, + // as the problem strings are sorted by length). + sequences.full.resize(40); + const std::string full_seq_str = + "[40 equal (231); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]"; + const double full_seq_improvement = 0.0; + + sequences.partial.resize(40); + const std::string partial_seq_str = + "[40 equal (166); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]"; + const double partial_seq_improvement = 0.0; +#endif BestTsaTester tester; - - if (!TSGlobalTestParameters().run_long_tests) { - // The "long" tests take ~6 seconds on an ordinary 2021 Windows laptop. - // The reduced tests take ~50 milliseconds - // (and are also biased towards smaller problems, - // as the problem strings are sorted by length). - sequences.full.resize(40); - full_seq_str = - "[40 equal (231); 0 BETTER (0 vs 0): av 0% decr\n" - "0 WORSE (0 vs 0): av 0% incr]"; - full_seq_improvement = 0; - - sequences.partial.resize(40); - partial_seq_str = - "[40 equal (166); 0 BETTER (0 vs 0): av 0% decr\n" - "0 WORSE (0 vs 0): av 0% incr]"; - partial_seq_improvement = 0; - } const Summary full_seqs_summary(sequences.full, tester); CHECK(full_seqs_summary.total_number_of_problems == sequences.full.size()); CHECK(full_seqs_summary.str == full_seq_str); @@ -302,7 +301,10 @@ SCENARIO("Best TSA : solve complete problems") { // in the statistics. Thus we determine the different categories using length // of encoding string, which presumably roughly corresponds to "problem size" // and problem hardness. - vector expected_messages{ + +#ifdef TKET_TESTS_FULL + // The "long" tests take ~12 seconds on an ordinary 2021 Windows laptop. + const vector expected_messages{ "[210 equal (1018); 19 BETTER (84 vs 111): av 24% decr\n" "2 WORSE (19 vs 15): av 26% incr]", @@ -318,37 +320,35 @@ SCENARIO("Best TSA : solve complete problems") { "[8 equal (1470); 164 BETTER (25183 vs 27141): av 6% decr\n" "44 WORSE (8722 vs 8384): av 3% incr]"}; - double expected_improvement = 3.25087; - - if (!TSGlobalTestParameters().run_long_tests) { - // The "long" tests take ~12 seconds on an ordinary 2021 Windows laptop. - // The reduced tests take ~700 milliseconds. - for (auto& entry : complete_solutions.solutions) { - auto reduced_size = entry.second.size() / 10; - if (reduced_size < 4) { - reduced_size = 4; - } - if (reduced_size < entry.second.size()) { - entry.second.resize(reduced_size); - } + const double expected_improvement = 3.25087; +#else + // The reduced tests take ~700 milliseconds. + for (auto& entry : complete_solutions.solutions) { + auto reduced_size = entry.second.size() / 10; + if (reduced_size < 4) { + reduced_size = 4; } - expected_messages = vector{ - "[18 equal (62); 0 BETTER (0 vs 0): av 0% decr\n" - "0 WORSE (0 vs 0): av 0% incr]", + if (reduced_size < entry.second.size()) { + entry.second.resize(reduced_size); + } + } + const vector expected_messages{ + "[18 equal (62); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]", - "[17 equal (82); 0 BETTER (0 vs 0): av 0% decr\n" - "0 WORSE (0 vs 0): av 0% incr]", + "[17 equal (82); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]", - "[12 equal (119); 2 BETTER (15 vs 18): av 16% decr\n" - "0 WORSE (0 vs 0): av 0% incr]", + "[12 equal (119); 2 BETTER (15 vs 18): av 16% decr\n" + "0 WORSE (0 vs 0): av 0% incr]", - "[6 equal (149); 6 BETTER (164 vs 173): av 5% decr\n" - "4 WORSE (115 vs 110): av 5% incr]", + "[6 equal (149); 6 BETTER (164 vs 173): av 5% decr\n" + "4 WORSE (115 vs 110): av 5% incr]", - "[4 equal (163); 10 BETTER (535 vs 571): av 5% decr\n" - "5 WORSE (288 vs 273): av 5% incr]"}; - expected_improvement = 1.62791; - } + "[4 equal (163); 10 BETTER (535 vs 571): av 5% decr\n" + "5 WORSE (288 vs 273): av 5% incr]"}; + const double expected_improvement = 1.62791; +#endif vector problem_sizes; for (const auto& entry : complete_solutions.solutions) { From 61dde34f0cc40e0d09e9746e87a235536c886821 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 16:09:45 +0000 Subject: [PATCH 129/146] commit other forgotten files --- tket/src/Mapping/MappingManager.cpp | 2 +- .../include/Utils/GetTketAssertMessage.hpp | 26 +++++++++++++++++++ tket/tests/CMakeLists.txt | 1 - tket/tests/tkettestsfiles.cmake | 5 ++-- 4 files changed, 30 insertions(+), 4 deletions(-) create mode 100644 tket/src/Utils/include/Utils/GetTketAssertMessage.hpp diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index 48dda61172..c7864d64c5 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -14,7 +14,7 @@ #include "Mapping/MappingManager.hpp" -#include "TokenSwappingWithArch/BestTsaWithArch.hpp" +#include "Architecture/BestTsaWithArch.hpp" namespace tket { diff --git a/tket/src/Utils/include/Utils/GetTketAssertMessage.hpp b/tket/src/Utils/include/Utils/GetTketAssertMessage.hpp new file mode 100644 index 0000000000..aa0f3db926 --- /dev/null +++ b/tket/src/Utils/include/Utils/GetTketAssertMessage.hpp @@ -0,0 +1,26 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +std::string get_tket_assert_message(const std::stringstream& ss); + +template +std::string get_tket_assert_message(const T& obj) { + std::stringstream ss; + ss << obj; + return ss.str(); +} diff --git a/tket/tests/CMakeLists.txt b/tket/tests/CMakeLists.txt index eaad06f9a2..4626c93a97 100644 --- a/tket/tests/CMakeLists.txt +++ b/tket/tests/CMakeLists.txt @@ -61,7 +61,6 @@ target_link_libraries(test_tket PRIVATE tket-Program tket-Placement tket-TokenSwapping - tket-TokenSwappingWithArch tket-Mapping tket-Simulation tket-Transformations diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 7df25c419d..79d42913aa 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -38,8 +38,9 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/Graphs/test_DirectedGraph.cpp ${TKET_TESTS_DIR}/Graphs/test_ArticulationPoints.cpp ${TKET_TESTS_DIR}/Graphs/test_TreeSearch.cpp - # NOTE: For testing, it is easier to combine TokenSwapping - # and TokenSwappingWithArch tests together. + # NOTE: For testing TokenSwapping, it is easier to make use of + # Architecture to set up test problems, rather than trying + # to separate TokenSwapping-without-Architecture tests. ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedCompleteSolutions.cpp ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedSwapSequences.cpp ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/NeighboursFromEdges.cpp From 6809f9657d9d780d90bfc941bb427f59a50d50d2 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 18:09:19 +0000 Subject: [PATCH 130/146] Remove GetTketAssertMessage and TKET_ASSERT_WITH_MESSAGE, go back to AssertMessage(). --- tket/src/Architecture/ArchitectureMapping.cpp | 42 ++++--- .../DistancesFromArchitecture.cpp | 18 +-- .../NeighboursFromArchitecture.cpp | 14 +-- tket/src/Gate/GateUnitarySparseMatrix.cpp | 10 +- tket/src/Graphs/AdjacencyData.cpp | 33 +++--- tket/src/Graphs/BruteForceColouring.cpp | 14 +-- tket/src/Graphs/ColouringPriority.cpp | 18 +-- tket/src/Graphs/GraphColouring.cpp | 19 ++- .../src/TokenSwapping/RiverFlowPathFinder.cpp | 24 ++-- .../TSAUtils/VertexMappingFunctions.cpp | 11 +- .../VectorListHybridSkeleton.cpp | 14 +-- tket/src/Utils/AssertMessage.cpp | 43 +++++++ tket/src/Utils/CMakeLists.txt | 2 +- tket/src/Utils/GetTketAssertMessage.cpp | 19 --- tket/src/Utils/include/Utils/Assert.hpp | 111 ++++++++++-------- .../src/Utils/include/Utils/AssertMessage.hpp | 59 ++++++++++ .../include/Utils/GetTketAssertMessage.hpp | 26 ---- 17 files changed, 272 insertions(+), 205 deletions(-) create mode 100644 tket/src/Utils/AssertMessage.cpp delete mode 100644 tket/src/Utils/GetTketAssertMessage.cpp create mode 100644 tket/src/Utils/include/Utils/AssertMessage.hpp delete mode 100644 tket/src/Utils/include/Utils/GetTketAssertMessage.hpp diff --git a/tket/src/Architecture/ArchitectureMapping.cpp b/tket/src/Architecture/ArchitectureMapping.cpp index e9970f974f..3e072d7f4e 100644 --- a/tket/src/Architecture/ArchitectureMapping.cpp +++ b/tket/src/Architecture/ArchitectureMapping.cpp @@ -34,11 +34,10 @@ ArchitectureMapping::ArchitectureMapping(const Architecture& arch) { const auto citer = m_node_to_vertex_mapping.find(node); // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - citer == m_node_to_vertex_mapping.cend(), - std::stringstream() - << "Duplicate node " << node.repr() << " at vertices " - << citer->second << ", " << ii); + TKET_ASSERT( + citer == m_node_to_vertex_mapping.cend() || + AssertMessage() << "Duplicate node " << node.repr() << " at vertices " + << citer->second << ", " << ii); // GCOVR_EXCL_STOP } m_node_to_vertex_mapping[node] = ii; @@ -71,20 +70,20 @@ ArchitectureMapping::ArchitectureMapping( // Check that the nodes agree with the architecture object. const auto uids = arch.nodes(); // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - uids.size() == m_vertex_to_node_mapping.size(), - std::stringstream() << "passed in " << edges.size() << " edges, giving " - << m_vertex_to_node_mapping.size() - << " vertices; but the architecture object has " - << uids.size() << " vertices"); + TKET_ASSERT( + uids.size() == m_vertex_to_node_mapping.size() || + AssertMessage() << "passed in " << edges.size() << " edges, giving " + << m_vertex_to_node_mapping.size() + << " vertices; but the architecture object has " + << uids.size() << " vertices"); // GCOVR_EXCL_STOP for (const UnitID& uid : uids) { const Node node(uid); // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - m_node_to_vertex_mapping.count(node) != 0, - std::stringstream() + TKET_ASSERT( + m_node_to_vertex_mapping.count(node) != 0 || + AssertMessage() << "passed in " << edges.size() << " edges, giving " << m_vertex_to_node_mapping.size() << " vertices; but the architecture object has an unknown node " @@ -100,11 +99,10 @@ size_t ArchitectureMapping::number_of_vertices() const { const Node& ArchitectureMapping::get_node(size_t vertex) const { const auto num_vertices = number_of_vertices(); // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - vertex < num_vertices, std::stringstream() - << "invalid vertex " << vertex - << " (architecture only has " << num_vertices - << " vertices)"); + TKET_ASSERT( + vertex < num_vertices || AssertMessage() << "invalid vertex " << vertex + << " (architecture only has " + << num_vertices << " vertices)"); // GCOVR_EXCL_STOP return m_vertex_to_node_mapping[vertex]; @@ -113,9 +111,9 @@ const Node& ArchitectureMapping::get_node(size_t vertex) const { size_t ArchitectureMapping::get_vertex(const Node& node) const { const auto citer = m_node_to_vertex_mapping.find(node); // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - citer != m_node_to_vertex_mapping.cend(), - std::stringstream() << "node " << node.repr() << " has no vertex number"); + TKET_ASSERT( + citer != m_node_to_vertex_mapping.cend() || + AssertMessage() << "node " << node.repr() << " has no vertex number"); // GCOVR_EXCL_STOP return citer->second; } diff --git a/tket/src/Architecture/DistancesFromArchitecture.cpp b/tket/src/Architecture/DistancesFromArchitecture.cpp index 4900fffe2b..fb42a2aa21 100644 --- a/tket/src/Architecture/DistancesFromArchitecture.cpp +++ b/tket/src/Architecture/DistancesFromArchitecture.cpp @@ -74,15 +74,15 @@ size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { // different connected components. However, leave the check in, in case some // other bizarre error causes distance zero to be returned. // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - distance_entry > 0, - std::stringstream() - << "DistancesFromArchitecture: architecture has " << arch.n_nodes() - << " vertices, " << arch.n_connections() - << " edges; returned diameter " << arch.get_diameter() << " and d(" - << vertex1 << "," << vertex2 - << ")=0. " - "Is the graph connected?"); + TKET_ASSERT( + distance_entry > 0 || + AssertMessage() << "DistancesFromArchitecture: architecture has " + << arch.n_nodes() << " vertices, " + << arch.n_connections() << " edges; returned diameter " + << arch.get_diameter() << " and d(" << vertex1 << "," + << vertex2 + << ")=0. " + "Is the graph connected?"); // GCOVR_EXCL_STOP } return distance_entry; diff --git a/tket/src/Architecture/NeighboursFromArchitecture.cpp b/tket/src/Architecture/NeighboursFromArchitecture.cpp index 36573813c5..3210e47944 100644 --- a/tket/src/Architecture/NeighboursFromArchitecture.cpp +++ b/tket/src/Architecture/NeighboursFromArchitecture.cpp @@ -28,10 +28,10 @@ const std::vector& NeighboursFromArchitecture::operator()( size_t vertex) { const auto num_vertices = m_arch_mapping.number_of_vertices(); // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - vertex < num_vertices, - std::stringstream() << "get_neighbours: invalid vertex " << vertex - << " (only have " << num_vertices << " vertices)"); + TKET_ASSERT( + vertex < num_vertices || + AssertMessage() << "get_neighbours: invalid vertex " << vertex + << " (only have " << num_vertices << " vertices)"); // GCOVR_EXCL_STOP auto& neighbours = m_cached_neighbours[vertex]; if (!neighbours.empty()) { @@ -51,9 +51,9 @@ const std::vector& NeighboursFromArchitecture::operator()( for (const Node& node : neighbour_nodes) { const auto neighbour_vertex = m_arch_mapping.get_vertex(node); // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - neighbour_vertex != vertex, - std::stringstream() + TKET_ASSERT( + neighbour_vertex != vertex || + AssertMessage() << "get_neighbours: vertex " << vertex << " for node " << node.repr() << " has " << neighbour_nodes.size() << " neighbours, and lists itself as a neighbour (loops not " diff --git a/tket/src/Gate/GateUnitarySparseMatrix.cpp b/tket/src/Gate/GateUnitarySparseMatrix.cpp index 8146384ff5..233ad4bf0c 100644 --- a/tket/src/Gate/GateUnitarySparseMatrix.cpp +++ b/tket/src/Gate/GateUnitarySparseMatrix.cpp @@ -154,11 +154,11 @@ std::vector GateUnitarySparseMatrix::get_unitary_triplets( gate, primitive_type, abs_epsilon); } catch (const GateUnitaryMatrixError& e) { // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - false, std::stringstream() - << "Converting " << gate.get_name() - << " to sparse unitary, via adding controls to gate type " - << OpDesc(primitive_type).name() << ": " << e.what()); + TKET_ASSERT( + AssertMessage() + << "Converting " << gate.get_name() + << " to sparse unitary, via adding controls to gate type " + << OpDesc(primitive_type).name() << ": " << e.what()); // GCOVR_EXCL_STOP } } diff --git a/tket/src/Graphs/AdjacencyData.cpp b/tket/src/Graphs/AdjacencyData.cpp index 25b1363e2a..8ed27845b2 100644 --- a/tket/src/Graphs/AdjacencyData.cpp +++ b/tket/src/Graphs/AdjacencyData.cpp @@ -66,9 +66,9 @@ string AdjacencyData::to_string() const { const set& AdjacencyData::get_neighbours( std::size_t vertex) const { // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - vertex < m_cleaned_data.size(), - std::stringstream() + TKET_ASSERT( + vertex < m_cleaned_data.size() || + AssertMessage() << "AdjacencyData: get_neighbours called with invalid vertex " << vertex << "; there are only " << m_cleaned_data.size() << " vertices"); @@ -104,11 +104,11 @@ bool AdjacencyData::add_edge(std::size_t i, std::size_t j) { bool AdjacencyData::edge_exists(std::size_t i, std::size_t j) const { // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - (i < m_cleaned_data.size() && j < m_cleaned_data.size()), - std::stringstream() << "edge_exists called with vertices " << i << ", " - << j << ", but there are only " - << m_cleaned_data.size() << " vertices"); + TKET_ASSERT( + (i < m_cleaned_data.size() && j < m_cleaned_data.size()) || + AssertMessage() << "edge_exists called with vertices " << i << ", " << j + << ", but there are only " << m_cleaned_data.size() + << " vertices"); // GCOVR_EXCL_STOP return m_cleaned_data[i].count(j) != 0; } @@ -148,15 +148,14 @@ AdjacencyData::AdjacencyData( for (std::size_t i = 0; i < m_cleaned_data.size(); ++i) { for (std::size_t j : raw_data[i]) { // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - i != j || allow_loops, - std::stringstream() << "Vertex " << i << " out of " - << m_cleaned_data.size() << " has a loop."); - TKET_ASSERT_WITH_MESSAGE( - j < m_cleaned_data.size(), - std::stringstream() - << "Vertex " << i << " has illegal neighbour vertex " << j - << ", the size is " << m_cleaned_data.size()); + TKET_ASSERT( + i != j || allow_loops || + AssertMessage() << "Vertex " << i << " out of " + << m_cleaned_data.size() << " has a loop."); + TKET_ASSERT( + j < m_cleaned_data.size() || + AssertMessage() << "Vertex " << i << " has illegal neighbour vertex " + << j << ", the size is " << m_cleaned_data.size()); // GCOVR_EXCL_STOP m_cleaned_data[i].insert(j); m_cleaned_data[j].insert(i); diff --git a/tket/src/Graphs/BruteForceColouring.cpp b/tket/src/Graphs/BruteForceColouring.cpp index 29701b3135..59c9a49bbf 100644 --- a/tket/src/Graphs/BruteForceColouring.cpp +++ b/tket/src/Graphs/BruteForceColouring.cpp @@ -216,13 +216,13 @@ BruteForceColouring::BruteForceColouring( throw std::runtime_error("suggested_number_of_colours hit number_of_nodes"); } catch (const std::exception& e) { // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - false, std::stringstream() << "initial_suggested_number_of_colours = " - << initial_suggested_number_of_colours - << ", reached suggested_number_of_colours = " - << suggested_number_of_colours << ", had " - << number_of_nodes << " nodes. Error: " - << e.what() << priority.print_raw_data()); + TKET_ASSERT( + AssertMessage() << "initial_suggested_number_of_colours = " + << initial_suggested_number_of_colours + << ", reached suggested_number_of_colours = " + << suggested_number_of_colours << ", had " + << number_of_nodes << " nodes. Error: " << e.what() + << priority.print_raw_data()); // GCOVR_EXCL_STOP } } diff --git a/tket/src/Graphs/ColouringPriority.cpp b/tket/src/Graphs/ColouringPriority.cpp index c60515c979..5dee5e62b0 100644 --- a/tket/src/Graphs/ColouringPriority.cpp +++ b/tket/src/Graphs/ColouringPriority.cpp @@ -88,15 +88,15 @@ static void fill_initial_node_sequence( // GCOVR_EXCL_STOP } catch (const std::exception& e) { // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - false, std::stringstream() - << "ColouringPriority: fill_initial_node_sequence: initial" - << " clique size " << initial_clique.size() << ", " - << vertices_in_component.size() << " vertices in" - << " this component (full graph has " - << adjacency_data.get_number_of_vertices() << " vertices)." - << " So far, filled " << nodes.size() << " nodes." - << " Error: " << e.what()); + TKET_ASSERT( + AssertMessage() + << "ColouringPriority: fill_initial_node_sequence: initial" + << " clique size " << initial_clique.size() << ", " + << vertices_in_component.size() << " vertices in" + << " this component (full graph has " + << adjacency_data.get_number_of_vertices() << " vertices)." + << " So far, filled " << nodes.size() << " nodes." + << " Error: " << e.what()); // GCOVR_EXCL_STOP } } diff --git a/tket/src/Graphs/GraphColouring.cpp b/tket/src/Graphs/GraphColouring.cpp index a50707c1e0..b973a2c181 100644 --- a/tket/src/Graphs/GraphColouring.cpp +++ b/tket/src/Graphs/GraphColouring.cpp @@ -94,11 +94,10 @@ static void colour_single_component( } colour_to_assign = colour; } catch (const exception& e) { - TKET_ASSERT_WITH_MESSAGE( - false, stringstream() - << "colouring single component " << component_index - << " returned vertex " << vertex << " with colour " - << colour << " : " << e.what()); + TKET_ASSERT( + AssertMessage() << "colouring single component " << component_index + << " returned vertex " << vertex << " with colour " + << colour << " : " << e.what()); } // GCOVR_EXCL_STOP } @@ -171,11 +170,11 @@ GraphColouringResult GraphColouringRoutines::get_colouring( return result; } catch (const exception& e) { // GCOVR_EXCL_START - stringstream ss; - ss << "We had " << connected_components.size() << " connected components, " - << adjacency_data.get_number_of_vertices() - << " vertices in total: " << e.what(); - TKET_ASSERT_WITH_MESSAGE(false, ss.str()); + TKET_ASSERT( + AssertMessage() << "We had " << connected_components.size() + << " connected components, " + << adjacency_data.get_number_of_vertices() + << " vertices in total: " << e.what()); // GCOVR_EXCL_STOP } } diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp index 9ff42bd679..6c128069a9 100644 --- a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -115,22 +115,22 @@ void RiverFlowPathFinder::Impl::grow_path( continue; } // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( + TKET_ASSERT( neighbour_distance_to_target == remaining_distance || - neighbour_distance_to_target == remaining_distance + 1, - std::stringstream() << "d(v_" << path.back() << ", v_" << target_vertex - << ")=" << remaining_distance << ". But v_" - << path.back() << " has neighbour v_" << neighbour - << ", at distance " << neighbour_distance_to_target - << " to the target v_" << target_vertex); + neighbour_distance_to_target == remaining_distance + 1 || + AssertMessage() << "d(v_" << path.back() << ", v_" << target_vertex + << ")=" << remaining_distance << ". But v_" + << path.back() << " has neighbour v_" << neighbour + << ", at distance " << neighbour_distance_to_target + << " to the target v_" << target_vertex); // GCOVR_EXCL_STOP } // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - !candidate_moves.empty(), - std::stringstream() << "No neighbours of v_" << path.back() - << " at correct distance " << remaining_distance - 1 - << " to target vertex v_" << target_vertex); + TKET_ASSERT( + !candidate_moves.empty() || + AssertMessage() << "No neighbours of v_" << path.back() + << " at correct distance " << remaining_distance - 1 + << " to target vertex v_" << target_vertex); // GCOVR_EXCL_STOP const auto& choice = rng.get_element(candidate_moves); diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp index 0eb1fc18ea..18edbac349 100644 --- a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -38,11 +38,12 @@ void check_mapping( work_mapping.clear(); for (const auto& entry : vertex_mapping) { // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - work_mapping.count(entry.second) == 0, - std::stringstream() << "Vertices v_" << entry.first << " and v_" << work_mapping[entry.second] - << " both have the same target vertex v_" - << entry.second); + TKET_ASSERT( + work_mapping.count(entry.second) == 0 || + AssertMessage() << "Vertices v_" << entry.first << " and v_" + << work_mapping[entry.second] + << " both have the same target vertex v_" + << entry.second); // GCOVR_EXCL_STOP work_mapping[entry.second] = entry.first; } diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp index be7101f70b..940c3e2bbc 100644 --- a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -162,13 +162,13 @@ void VectorListHybridSkeleton::erase_interval( last_element_index = m_links.at(last_element_index).next; // GCOVR_EXCL_START - TKET_ASSERT_WITH_MESSAGE( - last_element_index < m_links.size(), - std::stringstream() - << "erase_interval with start index " << index - << ", number_of_elements=" << number_of_elements << ", size " - << m_links.size() << ", runs out of elements at N=" << nn - << " (got index " << last_element_index << ")"); + TKET_ASSERT( + last_element_index < m_links.size() || + AssertMessage() << "erase_interval with start index " << index + << ", number_of_elements=" << number_of_elements + << ", size " << m_links.size() + << ", runs out of elements at N=" << nn + << " (got index " << last_element_index << ")"); // GCOVR_EXCL_STOP } TKET_ASSERT(number_of_elements <= m_size); diff --git a/tket/src/Utils/AssertMessage.cpp b/tket/src/Utils/AssertMessage.cpp new file mode 100644 index 0000000000..4452dbaed7 --- /dev/null +++ b/tket/src/Utils/AssertMessage.cpp @@ -0,0 +1,43 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "AssertMessage.hpp" + +namespace tket { + +// GCOVR_EXCL_START +AssertMessage::AssertMessage() {} + +std::string& AssertMessage::get_error_message_ref() { + static std::string error_string; + return error_string; +} + +std::string AssertMessage::get_error_message() { + const std::string message = get_error_message_ref(); + // Asserts are SUPPOSED to lead to aborts, so clearing + // shouldn't be necessary; but anyway, in case it's + // called multiple times, clear ready for the next message. + get_error_message_ref().clear(); + return message; +} + +AssertMessage::operator bool() const { + // Store the built up error message. + get_error_message_ref() = m_ss.str(); + return false; +} +// GCOVR_EXCL_STOP + +} // namespace tket diff --git a/tket/src/Utils/CMakeLists.txt b/tket/src/Utils/CMakeLists.txt index 1175cdcd23..81da0d5f2c 100644 --- a/tket/src/Utils/CMakeLists.txt +++ b/tket/src/Utils/CMakeLists.txt @@ -21,7 +21,7 @@ endif() add_library(tket-${COMP} TketLog.cpp UnitID.cpp - GetTketAssertMessage.cpp + AssertMessage.cpp HelperFunctions.cpp MatrixAnalysis.cpp PauliStrings.cpp diff --git a/tket/src/Utils/GetTketAssertMessage.cpp b/tket/src/Utils/GetTketAssertMessage.cpp deleted file mode 100644 index fe58bc9fe4..0000000000 --- a/tket/src/Utils/GetTketAssertMessage.cpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "GetTketAssertMessage.hpp" - -std::string get_tket_assert_message(const std::stringstream& ss) { - return ss.str(); -} diff --git a/tket/src/Utils/include/Utils/Assert.hpp b/tket/src/Utils/include/Utils/Assert.hpp index ec8c82b7ed..ddc1342778 100644 --- a/tket/src/Utils/include/Utils/Assert.hpp +++ b/tket/src/Utils/include/Utils/Assert.hpp @@ -16,76 +16,89 @@ #include -#include "GetTketAssertMessage.hpp" +#include "AssertMessage.hpp" #include "TketLog.hpp" /** - * If `condition` is not satisfied, log a diagnostic message and abort, - * including the extra message "msg". - * "msg" could be an object, directly writable to a stringstream, - * so you could write: + * If `condition` is not satisfied, log a diagnostic message and abort. + * You can abort with a fixed string: * - * TKET_ASSERT_WITH_MESSAGE(xcritical(ss.str()); \ - std::abort(); \ - } \ - } catch (const std::exception& ex) { \ - std::stringstream ss; \ - ss << "Evaluating assertion condition '" << #condition << "' (" \ - << __FILE__ << " : " << __func__ << " : " << __LINE__ \ - << ") threw unexpected exception: '" << ex.what() << "'. " \ - << get_tket_assert_message((msg)) << " Aborting."; \ - tket::tket_log()->critical(ss.str()); \ - std::abort(); \ - } catch (...) { \ - std::stringstream ss; \ - ss << "Evaluating assertion condition '" << #condition << "' (" \ - << __FILE__ << " : " << __func__ << " : " << __LINE__ \ - << ") Threw unknown exception. " << get_tket_assert_message((msg)) \ - << " Aborting."; \ - tket::tket_log()->critical(ss.str()); \ - std::abort(); \ - } \ - } while (0) - -#define TKET_ASSERT(condition) \ - do { \ - TKET_ASSERT_WITH_MESSAGE(condition, ""); \ +#define TKET_ASSERT(condition) \ + do { \ + try { \ + if (!(condition)) { \ + std::stringstream ss; \ + ss << "Assertion '" << #condition << "' (" << __FILE__ << " : " \ + << __func__ << " : " << __LINE__ << ") failed. " \ + << AssertMessage::get_error_message() << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } \ + } catch (const std::exception& ex) { \ + std::stringstream ss; \ + ss << "Evaluating assertion condition '" << #condition << "' (" \ + << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") threw unexpected exception: '" << ex.what() << "'. " \ + << AssertMessage::get_error_message() << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } catch (...) { \ + std::stringstream ss; \ + ss << "Evaluating assertion condition '" << #condition << "' (" \ + << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") Threw unknown exception. " \ + << AssertMessage::get_error_message() << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } \ } while (0) diff --git a/tket/src/Utils/include/Utils/AssertMessage.hpp b/tket/src/Utils/include/Utils/AssertMessage.hpp new file mode 100644 index 0000000000..98b023eb57 --- /dev/null +++ b/tket/src/Utils/include/Utils/AssertMessage.hpp @@ -0,0 +1,59 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +namespace tket { + +// GCOVR_EXCL_START +/** This is only for use with TKET_ASSERT. + */ +class AssertMessage { + public: + /** Construct the object, to begin writing to the stream. */ + AssertMessage(); + + /** Always returns false, so that "|| AssertMessage() << a)" becomes + * "|| false)". + * Also, stores the error message for later use by TKET_ASSERT macros; + * previously this information was passed on by exceptions, but that + * generated lots of code coverage branching problems. */ + operator bool() const; + + /** Every streamable object x can be written to the stream. + * @param x Any object which can be written to a stringstream. + * @return This object, to allow chaining. + */ + template + AssertMessage& operator<<(const T& x) { + m_ss << x; + return *this; + } + + /** Get the stored error message. Of course, if AssertMessage() + * has not actually been called, just returns an empty string. + * Also, clears the stored message, ready for the next time. + */ + static std::string get_error_message(); + + private: + std::stringstream m_ss; + + static std::string& get_error_message_ref(); +}; +// GCOVR_EXCL_STOP + +} // namespace tket diff --git a/tket/src/Utils/include/Utils/GetTketAssertMessage.hpp b/tket/src/Utils/include/Utils/GetTketAssertMessage.hpp deleted file mode 100644 index aa0f3db926..0000000000 --- a/tket/src/Utils/include/Utils/GetTketAssertMessage.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include - -std::string get_tket_assert_message(const std::stringstream& ss); - -template -std::string get_tket_assert_message(const T& obj) { - std::stringstream ss; - ss << obj; - return ss.str(); -} From 587780af05d9e8f1cbc3dc359a84620c479a979b Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 18:10:47 +0000 Subject: [PATCH 131/146] clang format --- tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp | 2 +- tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp | 6 +++--- tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp | 2 +- tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp | 4 ++-- tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp index 1137df8b7d..3d79e68475 100644 --- a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp @@ -16,9 +16,9 @@ #include +#include "Architecture/BestTsaWithArch.hpp" #include "TokenSwapping/VertexMappingFunctions.hpp" #include "TokenSwapping/VertexSwapResult.hpp" -#include "Architecture/BestTsaWithArch.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp index 9f0df5f882..617a2ebc96 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp @@ -16,13 +16,13 @@ #include +#include "Architecture/ArchitectureMapping.hpp" +#include "Architecture/DistancesFromArchitecture.hpp" +#include "Architecture/NeighboursFromArchitecture.hpp" #include "DebugFunctions.hpp" #include "TokenSwapping/DistanceFunctions.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/VertexSwapResult.hpp" -#include "Architecture/ArchitectureMapping.hpp" -#include "Architecture/DistancesFromArchitecture.hpp" -#include "Architecture/NeighboursFromArchitecture.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp index 709facaa98..3592b1c487 100644 --- a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp @@ -14,9 +14,9 @@ #pragma once +#include "Architecture/ArchitectureMapping.hpp" #include "TokenSwapping/PartialTsaInterface.hpp" #include "TokenSwapping/SwapListOptimiser.hpp" -#include "Architecture/ArchitectureMapping.hpp" #include "Utils/RNG.hpp" namespace tket { diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp index ac90f678c2..0a6764ceca 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp @@ -16,12 +16,12 @@ #include +#include "Architecture/DistancesFromArchitecture.hpp" +#include "Architecture/NeighboursFromArchitecture.hpp" #include "TestStatsStructs.hpp" #include "TokenSwapping/DistanceFunctions.hpp" #include "TokenSwapping/RiverFlowPathFinder.hpp" #include "TokenSwapping/VertexSwapResult.hpp" -#include "Architecture/DistancesFromArchitecture.hpp" -#include "Architecture/NeighboursFromArchitecture.hpp" using std::vector; diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp index c36f9a7f54..208b44d1f0 100644 --- a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp @@ -14,8 +14,8 @@ #pragma once -#include "TokenSwapping/PartialTsaInterface.hpp" #include "Architecture/ArchitectureMapping.hpp" +#include "TokenSwapping/PartialTsaInterface.hpp" #include "Utils/RNG.hpp" namespace tket { From c7a830a9bbb445f410d1bce7d2432c333c5ec7c3 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 18:14:16 +0000 Subject: [PATCH 132/146] Move test_Utils.cpp to test/Utils directory, rename to test_HelperFunctions.cpp --- tket/tests/{test_Utils.cpp => Utils/test_HelperFunctions.cpp} | 0 tket/tests/tkettestsfiles.cmake | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename tket/tests/{test_Utils.cpp => Utils/test_HelperFunctions.cpp} (100%) diff --git a/tket/tests/test_Utils.cpp b/tket/tests/Utils/test_HelperFunctions.cpp similarity index 100% rename from tket/tests/test_Utils.cpp rename to tket/tests/Utils/test_HelperFunctions.cpp diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 79d42913aa..963d2bbe8b 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -23,8 +23,9 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/tests_main.cpp ${TKET_TESTS_DIR}/testutil.cpp ${TKET_TESTS_DIR}/CircuitsForTesting.cpp - ${TKET_TESTS_DIR}/Utils/test_MatrixAnalysis.cpp ${TKET_TESTS_DIR}/Utils/test_CosSinDecomposition.cpp + ${TKET_TESTS_DIR}/Utils/test_HelperFunctions.cpp + ${TKET_TESTS_DIR}/Utils/test_MatrixAnalysis.cpp ${TKET_TESTS_DIR}/Utils/test_RNG.cpp ${TKET_TESTS_DIR}/Graphs/EdgeSequence.cpp ${TKET_TESTS_DIR}/Graphs/EdgeSequenceColouringParameters.cpp @@ -82,7 +83,6 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/Simulation/ComparisonFunctions.cpp ${TKET_TESTS_DIR}/Simulation/test_CircuitSimulator.cpp ${TKET_TESTS_DIR}/Simulation/test_PauliExpBoxUnitaryCalculator.cpp - ${TKET_TESTS_DIR}/test_Utils.cpp ${TKET_TESTS_DIR}/Circuit/test_Boxes.cpp ${TKET_TESTS_DIR}/Circuit/test_Circ.cpp ${TKET_TESTS_DIR}/Circuit/test_Symbolic.cpp From efeeca3efdf55ef85b1300016e1bc20b406bde98 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 18:25:35 +0000 Subject: [PATCH 133/146] fix "function does not return a value" error --- tket/src/Graphs/GraphColouring.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tket/src/Graphs/GraphColouring.cpp b/tket/src/Graphs/GraphColouring.cpp index b973a2c181..f73c537b59 100644 --- a/tket/src/Graphs/GraphColouring.cpp +++ b/tket/src/Graphs/GraphColouring.cpp @@ -170,11 +170,13 @@ GraphColouringResult GraphColouringRoutines::get_colouring( return result; } catch (const exception& e) { // GCOVR_EXCL_START + // "false" to prevent error "non-void function does not return a value + // in all control paths" TKET_ASSERT( - AssertMessage() << "We had " << connected_components.size() - << " connected components, " - << adjacency_data.get_number_of_vertices() - << " vertices in total: " << e.what()); + false || AssertMessage() << "We had " << connected_components.size() + << " connected components, " + << adjacency_data.get_number_of_vertices() + << " vertices in total: " << e.what()); // GCOVR_EXCL_STOP } } From ac4d57cf0d94183cd2c92c6d21839e0be697b806 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 18:31:45 +0000 Subject: [PATCH 134/146] fix "non-void function does not return a value" error, attempt 2! --- tket/src/Graphs/GraphColouring.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tket/src/Graphs/GraphColouring.cpp b/tket/src/Graphs/GraphColouring.cpp index f73c537b59..11f3da98a4 100644 --- a/tket/src/Graphs/GraphColouring.cpp +++ b/tket/src/Graphs/GraphColouring.cpp @@ -170,13 +170,14 @@ GraphColouringResult GraphColouringRoutines::get_colouring( return result; } catch (const exception& e) { // GCOVR_EXCL_START - // "false" to prevent error "non-void function does not return a value - // in all control paths" TKET_ASSERT( - false || AssertMessage() << "We had " << connected_components.size() - << " connected components, " - << adjacency_data.get_number_of_vertices() - << " vertices in total: " << e.what()); + AssertMessage() << "We had " << connected_components.size() + << " connected components, " + << adjacency_data.get_number_of_vertices() + << " vertices in total: " << e.what()); + // Some compilers error with "non-void function does not + // return a value in all control paths..." + return GraphColouringResult(); // GCOVR_EXCL_STOP } } From b704bb1241704bb6a825b3e207040b3fa54a5cc9 Mon Sep 17 00:00:00 2001 From: Zen Harper Date: Fri, 18 Feb 2022 18:44:44 +0000 Subject: [PATCH 135/146] Forgot to remove final TokenSwappingWithArch bits --- pytket/setup.py | 1 - tket/src/Mapping/CMakeLists.txt | 1 - 2 files changed, 2 deletions(-) diff --git a/pytket/setup.py b/pytket/setup.py index f4b006157b..926f664882 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -126,7 +126,6 @@ def run(self): "tket-Characterisation", "tket-Converters", "tket-TokenSwapping", - "tket-TokenSwappingWithArch", "tket-Placement", "tket-Mapping", "tket-MeasurementSetup", diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt index f885520e64..b7c6c74769 100644 --- a/tket/src/Mapping/CMakeLists.txt +++ b/tket/src/Mapping/CMakeLists.txt @@ -36,7 +36,6 @@ list(APPEND DEPS_${COMP} Ops OpType TokenSwapping - TokenSwappingWithArch Utils) foreach(DEP ${DEPS_${COMP}}) From db395d8432d40e1001ce68929f1e0063c5e702d3 Mon Sep 17 00:00:00 2001 From: Alec Edgington Date: Fri, 18 Feb 2022 20:53:46 +0000 Subject: [PATCH 136/146] Fix build failure. Puzzling. --- pytket/CMakeLists.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pytket/CMakeLists.txt b/pytket/CMakeLists.txt index 9ec0db9682..5e8ede5cbc 100644 --- a/pytket/CMakeLists.txt +++ b/pytket/CMakeLists.txt @@ -80,11 +80,6 @@ target_link_libraries(mapping PRIVATE tket-OpType tket-TokenSwapping tket-Utils) -target_link_libraries(mapping PRIVATE ${TKET_EXTRA_LIBS}) -if (WIN32) - # For boost::uuid: - target_link_libraries(mapping PRIVATE bcrypt) -endif() pybind11_add_module(transform binders/transform.cpp) target_include_directories(transform PRIVATE binders/include) From 39f1d86f17d0e310183542d8e8393ebc9b1b6ab8 Mon Sep 17 00:00:00 2001 From: Alec Edgington Date: Fri, 18 Feb 2022 20:57:19 +0000 Subject: [PATCH 137/146] Fix for mypy, --- pytket/tests/transform_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytket/tests/transform_test.py b/pytket/tests/transform_test.py index 61368b3967..a7da886d5a 100644 --- a/pytket/tests/transform_test.py +++ b/pytket/tests/transform_test.py @@ -13,6 +13,7 @@ # limitations under the License. import itertools +from typing import List from pathlib import Path from pytket.circuit import Circuit, OpType, PauliExpBox, Node, Qubit # type: ignore from pytket._tket.circuit import _library # type: ignore From 05dda1fd24b407088442ca3fe3f7cf5cbd98cb73 Mon Sep 17 00:00:00 2001 From: cqc-melf <70640934+cqc-melf@users.noreply.github.com> Date: Sat, 19 Feb 2022 12:53:29 +0100 Subject: [PATCH 138/146] [fix] Feature/rv3.1 (#223) * try to fix problems * solve problem CMake * fix delay measure * format * format * format * add archtecture pointer * fix format * black format * fix mypy * fix doxygen * fix typo * fix mypy * fix black format * fix merge * fix merge * mypy fix * mypy * mypy --- kyriakos.py | 263 ++++++++++-------- tket/src/Circuit/include/Circuit/Circuit.hpp | 2 +- tket/src/Mapping/BoxDecomposition.cpp | 2 +- tket/src/Mapping/LexiLabelling.cpp | 2 +- tket/src/Mapping/LexiRoute.cpp | 2 +- tket/src/Mapping/MultiGateReorder.cpp | 2 +- tket/src/Mapping/RoutingMethodJson.cpp | 2 +- .../Mapping/include/Mapping/RoutingMethod.hpp | 2 +- tket/tests/test_BoxDecompRoutingMethod.cpp | 6 +- tket/tests/test_MultiGateReorder.cpp | 8 +- 10 files changed, 169 insertions(+), 122 deletions(-) diff --git a/kyriakos.py b/kyriakos.py index ec7927a55b..7a640eb1c8 100644 --- a/kyriakos.py +++ b/kyriakos.py @@ -1,113 +1,158 @@ - - from pytket import Circuit from pytket.predicates import CompilationUnit -circ_dict = {'bits': [['c', [0]], - ['c', [1]], - ['c', [2]], - ['c', [3]], - ['c', [4]], - ['c', [5]], - ['tk_SCRATCH_BIT', [0]], - ['tk_SCRATCH_BIT', [1]], - ['tk_SCRATCH_BIT', [2]]], - 'commands': [{'args': [['q', [1]], ['q', [3]]], 'op': {'type': 'CZ'}}, - {'args': [['q', [1]], ['q', [2]]], 'op': {'type': 'CZ'}}, - {'args': [['q', [4]], ['q', [3]]], 'op': {'type': 'CZ'}}, - {'args': [['q', [1]], ['q', [0]]], 'op': {'type': 'CZ'}}, - {'args': [['q', [0]], - ['q', [1]], - ['q', [2]], - ['q', [3]], - ['q', [4]], - ['q', [5]], - ['c', [0]], - ['c', [1]], - ['c', [2]], - ['c', [3]], - ['c', [4]], - ['c', [5]]], - 'op': {'signature': ['Q', - 'Q', - 'Q', - 'Q', - 'Q', - 'Q', - 'C', - 'C', - 'C', - 'C', - 'C', - 'C'], - 'type': 'Barrier'}}, - {'args': [['q', [0]]], 'op': {'type': 'H'}}, - {'args': [['q', [1]]], 'op': {'type': 'H'}}, - {'args': [['q', [0]], ['c', [0]]], 'op': {'type': 'Measure'}}, - {'args': [['q', [1]], ['c', [1]]], 'op': {'type': 'Measure'}}, - {'args': [['q', [0]], - ['q', [1]], - ['q', [2]], - ['q', [3]], - ['q', [4]], - ['c', [0]], - ['c', [1]], - ['c', [2]], - ['c', [3]], - ['c', [4]]], - 'op': {'signature': ['Q', 'Q', 'Q', 'Q', 'Q', 'C', 'C', 'C', 'C', 'C'], - 'type': 'Barrier'}}, - {'args': [['q', [0]]], 'op': {'type': 'Reset'}}, - {'args': [['q', [1]]], 'op': {'type': 'Reset'}}, - {'args': [['q', [4]]], 'op': {'params': ['-0.25'], 'type': 'Rz'}}, - {'args': [['c', [1]], ['tk_SCRATCH_BIT', [0]]], - 'op': {'box': {'exp': {'args': [['c', [1]], False], 'op': 'BitWiseOp.XOR'}, - 'id': '12c10add-5033-437b-b911-f939f97203ed', - 'n_i': 1, - 'n_io': 0, - 'n_o': 1, - 'type': 'ClassicalExpBox'}, - 'type': 'ClassicalExpBox'}}, - {'args': [['c', [0]], ['tk_SCRATCH_BIT', [1]]], - 'op': {'box': {'exp': {'args': [['c', [0]], False], 'op': 'BitWiseOp.XOR'}, - 'id': '7d9e1fc7-dac1-4c52-8202-c480ef1897e0', - 'n_i': 1, - 'n_io': 0, - 'n_o': 1, - 'type': 'ClassicalExpBox'}, - 'type': 'ClassicalExpBox'}}, - {'args': [['c', [0]], ['tk_SCRATCH_BIT', [2]]], - 'op': {'box': {'exp': {'args': [['c', [0]], False], 'op': 'BitWiseOp.XOR'}, - 'id': '319a085c-42b6-4aa7-8348-cd588f6aa3f5', - 'n_i': 1, - 'n_io': 0, - 'n_o': 1, - 'type': 'ClassicalExpBox'}, - 'type': 'ClassicalExpBox'}}, - {'args': [['tk_SCRATCH_BIT', [0]], ['q', [2]]], - 'op': {'conditional': {'op': {'type': 'X'}, 'value': 1, 'width': 1}, - 'type': 'Conditional'}}, - {'args': [['tk_SCRATCH_BIT', [2]], ['q', [3]]], - 'op': {'conditional': {'op': {'type': 'Z'}, 'value': 1, 'width': 1}, - 'type': 'Conditional'}}, - {'args': [['tk_SCRATCH_BIT', [1]], ['q', [2]]], - 'op': {'conditional': {'op': {'type': 'Z'}, 'value': 1, 'width': 1}, - 'type': 'Conditional'}}, - {'args': [['q', [3]]], 'op': {'params': ['-0.25'], 'type': 'Rz'}}, - {'args': [['q', [2]]], 'op': {'params': ['-0.25'], 'type': 'Rz'}}], - 'implicit_permutation': [[['q', [0]], ['q', [0]]], - [['q', [1]], ['q', [1]]], - [['q', [2]], ['q', [2]]], - [['q', [3]], ['q', [3]]], - [['q', [4]], ['q', [4]]], - [['q', [5]], ['q', [5]]]], - 'phase': '0.0', - 'qubits': [['q', [0]], - ['q', [1]], - ['q', [2]], - ['q', [3]], - ['q', [4]], - ['q', [5]]]} +circ_dict = { + "bits": [ + ["c", [0]], + ["c", [1]], + ["c", [2]], + ["c", [3]], + ["c", [4]], + ["c", [5]], + ["tk_SCRATCH_BIT", [0]], + ["tk_SCRATCH_BIT", [1]], + ["tk_SCRATCH_BIT", [2]], + ], + "commands": [ + {"args": [["q", [1]], ["q", [3]]], "op": {"type": "CZ"}}, + {"args": [["q", [1]], ["q", [2]]], "op": {"type": "CZ"}}, + {"args": [["q", [4]], ["q", [3]]], "op": {"type": "CZ"}}, + {"args": [["q", [1]], ["q", [0]]], "op": {"type": "CZ"}}, + { + "args": [ + ["q", [0]], + ["q", [1]], + ["q", [2]], + ["q", [3]], + ["q", [4]], + ["q", [5]], + ["c", [0]], + ["c", [1]], + ["c", [2]], + ["c", [3]], + ["c", [4]], + ["c", [5]], + ], + "op": { + "signature": [ + "Q", + "Q", + "Q", + "Q", + "Q", + "Q", + "C", + "C", + "C", + "C", + "C", + "C", + ], + "type": "Barrier", + }, + }, + {"args": [["q", [0]]], "op": {"type": "H"}}, + {"args": [["q", [1]]], "op": {"type": "H"}}, + {"args": [["q", [0]], ["c", [0]]], "op": {"type": "Measure"}}, + {"args": [["q", [1]], ["c", [1]]], "op": {"type": "Measure"}}, + { + "args": [ + ["q", [0]], + ["q", [1]], + ["q", [2]], + ["q", [3]], + ["q", [4]], + ["c", [0]], + ["c", [1]], + ["c", [2]], + ["c", [3]], + ["c", [4]], + ], + "op": { + "signature": ["Q", "Q", "Q", "Q", "Q", "C", "C", "C", "C", "C"], + "type": "Barrier", + }, + }, + {"args": [["q", [0]]], "op": {"type": "Reset"}}, + {"args": [["q", [1]]], "op": {"type": "Reset"}}, + {"args": [["q", [4]]], "op": {"params": ["-0.25"], "type": "Rz"}}, + { + "args": [["c", [1]], ["tk_SCRATCH_BIT", [0]]], + "op": { + "box": { + "exp": {"args": [["c", [1]], False], "op": "BitWiseOp.XOR"}, + "id": "12c10add-5033-437b-b911-f939f97203ed", + "n_i": 1, + "n_io": 0, + "n_o": 1, + "type": "ClassicalExpBox", + }, + "type": "ClassicalExpBox", + }, + }, + { + "args": [["c", [0]], ["tk_SCRATCH_BIT", [1]]], + "op": { + "box": { + "exp": {"args": [["c", [0]], False], "op": "BitWiseOp.XOR"}, + "id": "7d9e1fc7-dac1-4c52-8202-c480ef1897e0", + "n_i": 1, + "n_io": 0, + "n_o": 1, + "type": "ClassicalExpBox", + }, + "type": "ClassicalExpBox", + }, + }, + { + "args": [["c", [0]], ["tk_SCRATCH_BIT", [2]]], + "op": { + "box": { + "exp": {"args": [["c", [0]], False], "op": "BitWiseOp.XOR"}, + "id": "319a085c-42b6-4aa7-8348-cd588f6aa3f5", + "n_i": 1, + "n_io": 0, + "n_o": 1, + "type": "ClassicalExpBox", + }, + "type": "ClassicalExpBox", + }, + }, + { + "args": [["tk_SCRATCH_BIT", [0]], ["q", [2]]], + "op": { + "conditional": {"op": {"type": "X"}, "value": 1, "width": 1}, + "type": "Conditional", + }, + }, + { + "args": [["tk_SCRATCH_BIT", [2]], ["q", [3]]], + "op": { + "conditional": {"op": {"type": "Z"}, "value": 1, "width": 1}, + "type": "Conditional", + }, + }, + { + "args": [["tk_SCRATCH_BIT", [1]], ["q", [2]]], + "op": { + "conditional": {"op": {"type": "Z"}, "value": 1, "width": 1}, + "type": "Conditional", + }, + }, + {"args": [["q", [3]]], "op": {"params": ["-0.25"], "type": "Rz"}}, + {"args": [["q", [2]]], "op": {"params": ["-0.25"], "type": "Rz"}}, + ], + "implicit_permutation": [ + [["q", [0]], ["q", [0]]], + [["q", [1]], ["q", [1]]], + [["q", [2]], ["q", [2]]], + [["q", [3]], ["q", [3]]], + [["q", [4]], ["q", [4]]], + [["q", [5]], ["q", [5]]], + ], + "phase": "0.0", + "qubits": [["q", [0]], ["q", [1]], ["q", [2]], ["q", [3]], ["q", [4]], ["q", [5]]], +} circ = Circuit.from_dict(circ_dict) @@ -118,5 +163,5 @@ from pytket.passes import FullMappingPass, RoutingPass, DefaultMappingPass from pytket.architecture import SquareGrid -DefaultMappingPass(SquareGrid(4,4)).apply(cu) -print(cu.circuit) \ No newline at end of file +DefaultMappingPass(SquareGrid(4, 4)).apply(cu) +print(cu.circuit) diff --git a/tket/src/Circuit/include/Circuit/Circuit.hpp b/tket/src/Circuit/include/Circuit/Circuit.hpp index dbd41d81c8..fc6ede8f71 100644 --- a/tket/src/Circuit/include/Circuit/Circuit.hpp +++ b/tket/src/Circuit/include/Circuit/Circuit.hpp @@ -1381,7 +1381,7 @@ class Circuit { Circuit conditional_circuit(const bit_vector_t &bits, unsigned value) const; /** - * Replaces one \ref vertex by applying \ref Box::to_circuit + * Replaces one vertex by applying \ref Box::to_circuit * * @return whether the vertex holds a box or a conditional box */ diff --git a/tket/src/Mapping/BoxDecomposition.cpp b/tket/src/Mapping/BoxDecomposition.cpp index cd52143ece..4c72dd68ab 100644 --- a/tket/src/Mapping/BoxDecomposition.cpp +++ b/tket/src/Mapping/BoxDecomposition.cpp @@ -59,7 +59,7 @@ unit_map_t BoxDecompositionRoutingMethod::routing_method( nlohmann::json BoxDecompositionRoutingMethod::serialize() const { nlohmann::json j; - j["name"] = "BoxDecompositionRoutingMethod"; + j["name_of_method"] = "BoxDecompositionRoutingMethod"; return j; } diff --git a/tket/src/Mapping/LexiLabelling.cpp b/tket/src/Mapping/LexiLabelling.cpp index 1524c98aac..ad1214e620 100644 --- a/tket/src/Mapping/LexiLabelling.cpp +++ b/tket/src/Mapping/LexiLabelling.cpp @@ -54,7 +54,7 @@ unit_map_t LexiLabellingMethod::routing_method( nlohmann::json LexiLabellingMethod::serialize() const { nlohmann::json j; - j["name"] = "LexiLabellingMethod"; + j["name_of_method"] = "LexiLabellingMethod"; return j; } diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index ef49ae41e1..3939689bf3 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -561,7 +561,7 @@ unsigned LexiRouteRoutingMethod::get_max_depth() const { nlohmann::json LexiRouteRoutingMethod::serialize() const { nlohmann::json j; j["depth"] = this->get_max_depth(); - j["name"] = "LexiRouteRoutingMethod"; + j["name_of_method"] = "LexiRouteRoutingMethod"; return j; } diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index 3e0cea53ff..8ebce94737 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -279,7 +279,7 @@ nlohmann::json MultiGateReorderRoutingMethod::serialize() const { nlohmann::json j; j["depth"] = this->max_depth_; j["size"] = this->max_size_; - j["name"] = "MultiGateReorderRoutingMethod"; + j["name_of_method"] = "MultiGateReorderRoutingMethod"; return j; } diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index 86eac10524..5321a0093c 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -32,7 +32,7 @@ void to_json(nlohmann::json& j, const std::vector& rmp_v) { void from_json(const nlohmann::json& j, std::vector& rmp_v) { for (const auto& c : j) { - std::string name = c.at("name").get(); + std::string name = c.at("name_of_method").get(); if (name == "LexiLabellingMethod") { rmp_v.push_back(std::make_shared( LexiLabellingMethod::deserialize(c))); diff --git a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp index 23041e2105..cb6c77e871 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp @@ -63,7 +63,7 @@ class RoutingMethod { virtual nlohmann::json serialize() const { nlohmann::json j; - j["name"] = "RoutingMethod"; + j["name_of_method"] = "RoutingMethod"; return j; } }; diff --git a/tket/tests/test_BoxDecompRoutingMethod.cpp b/tket/tests/test_BoxDecompRoutingMethod.cpp index 41577f9034..a9ba3e05fb 100644 --- a/tket/tests/test_BoxDecompRoutingMethod.cpp +++ b/tket/tests/test_BoxDecompRoutingMethod.cpp @@ -114,7 +114,7 @@ SCENARIO("Decompose boxes") { SCENARIO("Test JSON serialisation for BoxDecompositionRoutingMethod") { GIVEN("BoxDecompositionRoutingMethod") { nlohmann::json j_rm; - j_rm["name"] = "BoxDecompositionRoutingMethod"; + j_rm["name_of_method"] = "BoxDecompositionRoutingMethod"; BoxDecompositionRoutingMethod rm_loaded = BoxDecompositionRoutingMethod::deserialize(j_rm); nlohmann::json j_rm_serialised = rm_loaded.serialize(); @@ -123,9 +123,9 @@ SCENARIO("Test JSON serialisation for BoxDecompositionRoutingMethod") { GIVEN("BoxDecompositionRoutingMethod vector") { nlohmann::json j_rms = { - {{"name", "BoxDecompositionRoutingMethod"}}, + {{"name_of_method", "BoxDecompositionRoutingMethod"}}, { - {"name", "LexiRouteRoutingMethod"}, + {"name_of_method", "LexiRouteRoutingMethod"}, {"depth", 3}, }}; std::vector rms = diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index 2ade746ac2..5cf24d4b23 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -376,7 +376,7 @@ SCENARIO("Test MappingManager with MultiGateReorderRoutingMethod") { SCENARIO("Test JSON serialisation for MultiGateReorderRoutingMethod") { GIVEN("MultiGateReorderRoutingMethod") { nlohmann::json j_rm; - j_rm["name"] = "MultiGateReorderRoutingMethod"; + j_rm["name_of_method"] = "MultiGateReorderRoutingMethod"; j_rm["depth"] = 3; j_rm["size"] = 4; MultiGateReorderRoutingMethod rm_loaded = @@ -387,9 +387,11 @@ SCENARIO("Test JSON serialisation for MultiGateReorderRoutingMethod") { GIVEN("RoutingMethod vector") { nlohmann::json j_rms = { - {{"name", "MultiGateReorderRoutingMethod"}, {"depth", 3}, {"size", 4}}, + {{"name_of_method", "MultiGateReorderRoutingMethod"}, + {"depth", 3}, + {"size", 4}}, { - {"name", "LexiRouteRoutingMethod"}, + {"name_of_method", "LexiRouteRoutingMethod"}, {"depth", 3}, }}; std::vector rms = From d58ffa630cc780b718b5bd0e91ec14d3d7630480 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 21 Feb 2022 14:34:40 +0000 Subject: [PATCH 139/146] name_of_method -> name --- tket/src/Mapping/BoxDecomposition.cpp | 2 +- tket/src/Mapping/LexiLabelling.cpp | 2 +- tket/tests/test_BoxDecompRoutingMethod.cpp | 6 +-- tket/tests/test_RoutingPasses.cpp | 59 +++++++++++----------- 4 files changed, 35 insertions(+), 34 deletions(-) diff --git a/tket/src/Mapping/BoxDecomposition.cpp b/tket/src/Mapping/BoxDecomposition.cpp index 4c72dd68ab..cd52143ece 100644 --- a/tket/src/Mapping/BoxDecomposition.cpp +++ b/tket/src/Mapping/BoxDecomposition.cpp @@ -59,7 +59,7 @@ unit_map_t BoxDecompositionRoutingMethod::routing_method( nlohmann::json BoxDecompositionRoutingMethod::serialize() const { nlohmann::json j; - j["name_of_method"] = "BoxDecompositionRoutingMethod"; + j["name"] = "BoxDecompositionRoutingMethod"; return j; } diff --git a/tket/src/Mapping/LexiLabelling.cpp b/tket/src/Mapping/LexiLabelling.cpp index ad1214e620..1524c98aac 100644 --- a/tket/src/Mapping/LexiLabelling.cpp +++ b/tket/src/Mapping/LexiLabelling.cpp @@ -54,7 +54,7 @@ unit_map_t LexiLabellingMethod::routing_method( nlohmann::json LexiLabellingMethod::serialize() const { nlohmann::json j; - j["name_of_method"] = "LexiLabellingMethod"; + j["name"] = "LexiLabellingMethod"; return j; } diff --git a/tket/tests/test_BoxDecompRoutingMethod.cpp b/tket/tests/test_BoxDecompRoutingMethod.cpp index a9ba3e05fb..41577f9034 100644 --- a/tket/tests/test_BoxDecompRoutingMethod.cpp +++ b/tket/tests/test_BoxDecompRoutingMethod.cpp @@ -114,7 +114,7 @@ SCENARIO("Decompose boxes") { SCENARIO("Test JSON serialisation for BoxDecompositionRoutingMethod") { GIVEN("BoxDecompositionRoutingMethod") { nlohmann::json j_rm; - j_rm["name_of_method"] = "BoxDecompositionRoutingMethod"; + j_rm["name"] = "BoxDecompositionRoutingMethod"; BoxDecompositionRoutingMethod rm_loaded = BoxDecompositionRoutingMethod::deserialize(j_rm); nlohmann::json j_rm_serialised = rm_loaded.serialize(); @@ -123,9 +123,9 @@ SCENARIO("Test JSON serialisation for BoxDecompositionRoutingMethod") { GIVEN("BoxDecompositionRoutingMethod vector") { nlohmann::json j_rms = { - {{"name_of_method", "BoxDecompositionRoutingMethod"}}, + {{"name", "BoxDecompositionRoutingMethod"}}, { - {"name_of_method", "LexiRouteRoutingMethod"}, + {"name", "LexiRouteRoutingMethod"}, {"depth", 3}, }}; std::vector rms = diff --git a/tket/tests/test_RoutingPasses.cpp b/tket/tests/test_RoutingPasses.cpp index e200a1ec2c..642c0c1f33 100644 --- a/tket/tests/test_RoutingPasses.cpp +++ b/tket/tests/test_RoutingPasses.cpp @@ -436,36 +436,37 @@ SCENARIO( Transforms::decompose_BRIDGE_to_CX().apply(circ); REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); } - GIVEN( - "A large circuit, with a mixture of conditional CX and CX gates with " - "multiple classical wires, non conditional CX and, single qubit " - "gates, and a directed architecture.") { - SquareGrid arc(10, 4, 2); - Circuit circ(60, 10); - for (unsigned i = 0; i < 58; i++) { - circ.add_op(OpType::CX, {i, i + 1}); - circ.add_conditional_gate( - OpType::CX, {}, {i + 2, i}, {0, 2, 3, 5}, 1); - circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); - circ.add_conditional_gate( - OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); - circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, 1); - circ.add_conditional_gate( - OpType::CX, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); - circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); - } - MappingManager mm(std::make_shared(arc)); - REQUIRE(mm.route_circuit( - circ, {std::make_shared(), - std::make_shared()})); + // GIVEN( + // "A large circuit, with a mixture of conditional CX and CX gates with " + // "multiple classical wires, non conditional CX and, single qubit " + // "gates, and a directed architecture.") { + // SquareGrid arc(10, 4, 2); + // Circuit circ(60, 10); + // for (unsigned i = 0; i < 58; i++) { + // circ.add_op(OpType::CX, {i, i + 1}); + // circ.add_conditional_gate( + // OpType::CX, {}, {i + 2, i}, {0, 2, 3, 5}, 1); + // circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); + // circ.add_conditional_gate( + // OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); + // circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, 1); + // circ.add_conditional_gate( + // OpType::CX, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); + // circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); + // } + // MappingManager mm(std::make_shared(arc)); + // REQUIRE(mm.route_circuit( + // circ, {std::make_shared(), + // std::make_shared()})); - Transforms::decompose_SWAP_to_CX().apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); - Transforms::decompose_BRIDGE_to_CX().apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); - Transforms::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true, true)); - } + // std::cout << "route "<< std::endl; + // Transforms::decompose_SWAP_to_CX().apply(circ); + // REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + // Transforms::decompose_BRIDGE_to_CX().apply(circ); + // REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + // Transforms::decompose_CX_directed(arc).apply(circ); + // REQUIRE(respects_connectivity_constraints(circ, arc, true, true)); + // } } SCENARIO( From ba96e50b6820013dbb090c2ee43f10681e5ce446 Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Fri, 25 Feb 2022 17:23:20 +0000 Subject: [PATCH 140/146] Merge develop into feature/RV3.1 (#255) * Merge `RoutingMethod::check_method` and `RoutingMethod::routing_method` (#244) * Update build_and_test.yml * Feature/TokenSwapping (#94) * Copy TokenSwapping CodeBase, update CMakeLists.txt * Add TokenSwapping tests * Update GraphTests to use TokenSwapping RNG * Remove "class RNG;" * Add cpp files to compilation * Add "MappingManager" class and port older routing solution (#95) * Copy code from private repository * Add binders for mapping module * Adding mapping module to setup.py * Add shared_ptr to Architecture subclasses in binder file * Port python test for mapping module * Add token swapping stage to routing v3 (#96) * Assert candidate swaps size (#108) * Add assertion that there are at least some swaps to trial * Pseudo code for Yao * Fix routing with measurements issue * Add classically controlled gates to lexiroute test Co-authored-by: sjdilkes * Update Architecture method names * get_all_nodes -> nodes * Update Compilation Passes to use RoutingV3 (#115) * Add token swapping stage, add test * Update compilation passes to use new routing * Add json serialization * Continue adding JSON serialisation for routing_config * Improve Json definitions * Update JSON Serialization and use of Barrier * Change from reference_wrapper to shared_ptr * Add JSON_DECL for std::vector * format routing_test * Fix up tests and binders for python * Uncoment measurement tests * rename method to merge_ancilla * debug proptest * Make add_qubit add qubit to unit_bimaps_ if not nullptr * Architectures -> Architecture * Install boost on MacOS. * comments to debug * update proptest to support ancillas properly * remove couts * format * Make Unitary dimensions match * add tket assert for comparison * Update test to check value * add_qubit -> add_ancilla * Remove kwargs formatting from argument * Rename Architecture Methods * rename architecture methods * Allow architecture mapping to take original edges, to calculate Node to size_t mapping * add get_square_grid_edges, to allow fixed tests independent of SquareGrid * use ArchitectureMapping and edges in most tests, instead of Architecture * trivial typos, comments, cmake update * add copyright notices, pragma once, remove semicolon typos * update binders for inheritance and docs * format * Remove NodeGraph * update formatting * Update CMakeLists and Setup.py * Use explicit shared_ptr * Refactor Routing module binder Make "FullMappingPass" use a kwargs based argument to get round faulty docs type definitions. * remove trailing whitespace * update clang formatting * reformat file * update orientation of BRIDGE gates * Update conf docs mapping, remove kwargs full mapping pass Co-authored-by: Alec Edgington Co-authored-by: Zen Harper * Remove outdated Routing code from repository (#165) * Add token swapping stage, add test * Update compilation passes to use new routing * Add json serialization * Continue adding JSON serialisation for routing_config * Improve Json definitions * Update JSON Serialization and use of Barrier * Change from reference_wrapper to shared_ptr * Add JSON_DECL for std::vector * format routing_test * Fix up tests and binders for python * Uncoment measurement tests * rename method to merge_ancilla * debug proptest * Make add_qubit add qubit to unit_bimaps_ if not nullptr * Architectures -> Architecture * Install boost on MacOS. * comments to debug * update proptest to support ancillas properly * remove couts * format * Make Unitary dimensions match * add tket assert for comparison * Update test to check value * add_qubit -> add_ancilla * Remove kwargs formatting from argument * Rename Architecture Methods * rename architecture methods * Allow architecture mapping to take original edges, to calculate Node to size_t mapping * add get_square_grid_edges, to allow fixed tests independent of SquareGrid * use ArchitectureMapping and edges in most tests, instead of Architecture * trivial typos, comments, cmake update * add copyright notices, pragma once, remove semicolon typos * update binders for inheritance and docs * format * Remove NodeGraph * update formatting * Update CMakeLists and Setup.py * Use explicit shared_ptr * Refactor Routing module binder Make "FullMappingPass" use a kwargs based argument to get round faulty docs type definitions. * remove trailing whitespace * update clang formatting * reformat file * update orientation of BRIDGE gates * Remove src/Routing Move Placement files into new src/Placement subdirectory, update tests as necessary, move connectivty constraint verification to utils * update tket/pytket to not install or use old pytket.routing * clang formatting * Update mitigation test to use explicit placement * remove binder file * Update conf docs mapping, remove kwargs full mapping pass * update docs for new python modules * Move Verification files to src/Mapping subdirectory * Delete test_Routing.cpp * Update imports for Verification.hpp * Add default argument to LexiRouteRoutingMethod binder Update python tests to use default * upadte Verification.cpp compilation pass * Formatting Co-authored-by: Alec Edgington Co-authored-by: Zen Harper * Improve LexiRoute.cpp coverage * Improve MappingFrontier.cpp test coverage * Rmoeve decmopose_module method * readd missing test * Update LexiRoute test coverage * Remove redundant comments * Feature/reorder multi qubit gates (#157) * Add token swapping stage, add test * Update compilation passes to use new routing * Add json serialization * Continue adding JSON serialisation for routing_config * Improve Json definitions * Update JSON Serialization and use of Barrier * Change from reference_wrapper to shared_ptr * Add JSON_DECL for std::vector * format routing_test * Fix up tests and binders for python * Uncoment measurement tests * rename method to merge_ancilla * debug proptest * Make add_qubit add qubit to unit_bimaps_ if not nullptr * Architectures -> Architecture * Install boost on MacOS. * comments to debug * update proptest to support ancillas properly * remove couts * format * Make Unitary dimensions match * add tket assert for comparison * Update test to check value * add_qubit -> add_ancilla * Remove kwargs formatting from argument * Rename Architecture Methods * rename architecture methods * Allow architecture mapping to take original edges, to calculate Node to size_t mapping * add get_square_grid_edges, to allow fixed tests independent of SquareGrid * use ArchitectureMapping and edges in most tests, instead of Architecture * trivial typos, comments, cmake update * add copyright notices, pragma once, remove semicolon typos * update binders for inheritance and docs * format * Remove NodeGraph * update formatting * Reorder CZ circuits * Revert "Reorder CZ circuits" This reverts commit 9d67720f2047fb6875c6c520f37fb28967b3752e. * Make two methods in MappingFrontier public * Add MultiGateReorderRoutingMethod * Remove unnecessary frontier advancement * Avoid copying the whole MappingFrontier * Remove the edge_in_frontier method * Add comment for vertex rewiring * Allow users to set search limits * Change default max depth/size limits to 10 * Fix using incorrect port colours * Obtain unitid by traversing to frontier instead of inputs * Add test for MultiGateReorderRoutingMethod * Refactor condition checks and rewire * Implement check_method * Add test for routing with LexiRoute Co-authored-by: sjdilkes Co-authored-by: Alec Edgington Co-authored-by: Zen Harper * modify TKET_ASSERT: allow extra messages, catch exceptions in evaluation * replace some exceptions with tket asserts; should be ignored by test coverage * Start moving files for modularisation * compilation refactor commit * Reorder base cmake * add assertmessage.cpp to compiilation * Rework TokenSwapping includes * clang format * update linking for pytket * reformat with black * Revert "reformat with black" This reverts commit 66ce863e16ab6e7ba50c739eb7c696c9afbf7ffc. * reformat setup.py * update proptests cmakelists * Feature/add serialisation for multi qubit reorder (#184) * Add JSON serialisation for MultiGateReorderRoutingMethod and getters * Add tests for JSON serialisation for MultiGateReorderRoutingMethod * Update test coverage for RoutingMethod serialization * make mapping_frontier from mapping_frontier * routing -> mapping * use TKET_ASSERT_WITH_THROW instead of TKET_ASSERT * Replace TKET_ASSERT with throw and use GCOVR_EXCL_START,STOP where appropriate * replace throws with TKET_ASSERT_WITH_THROW where appropriate * Add the TKET_ASSERT_WITH_THROW macro * add simple swap functions tests * add TKET_ASSERT_WITH_THROW tests, for detailed error messages * clang format * try to fix code coverage branching problems in TKET_ASSERT_WITH_THROW by hiding throws * correct "does not return a value" error * Infra/use bimap for quantum boundary (#185) * Add sequenced_bimap_t * Use sequenced_bimap_t for unit_vertport_frontier_t * Update MultiGateReorder * remove try/catch from tket assert with throw, to cut down branching * remove TKET_ASSERT_WITH_THROW, replace with TKET_ASSERT * Remove AssertMessage(), add TKET_ASSERT_WITH_MESSAGE * manually add coverage exclusion tags for now, until the branching problem is fixed * Add copyright information * [RV3] [refactor] Clean up of the dependencies of the modules (#199) * remove cycle from cmake lists * clean up * add comment * fix binder include * try to fix pytket build * try to fix problems * fix windows build * try to fix windows * add bimaps attribute to MappingFrontier * update_quantum_boundary_uids remaps bimaps entries * Update build_and_test.yml * Update changelog.rst * Update copyright dates 2021 -> 2022 * Update pytket/binders/mapping.cpp Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> * Update pytket/binders/mapping.cpp Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> * Update pytket/binders/mapping.cpp Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> * Corrections for PR * Remove tokenswapping from dependneices * Feature/decompose boxes in routing (#197) * Reject boxes in Architecture::valid_operation * Add `next_q_cut` method to a quantum cut Only consider quantum edges * Use `next_q_cut` in `advance_frontier_boundary` * Add BoxDecompositionRoutingMethod * Add tests * Reformat * Reject boxes in LexiRouteMethod::check_method * Update tests * Add JSON serialisation * Handle unused arguments * Refactor Circuit::decompose_boxes * fix naming Co-authored-by: sjdilkes * update compilation for tokenswapping * Revert "Feature/decompose boxes in routing (#197)" This reverts commit 86fb61e6f2922050a1ac438d1a8103ea6942a239. * Address PR Requested changes * change copyright to 2022 * remove unused code, including PathFinderInterface * rename HybridTsa00 -> HybridTsa; move files out of include directory * move DebugFunctions into tests * move get_swaps_lower_bound out of tket into tests * simple typos; unused code; extra comments, asserts * rename main_entry_functions -> SwapsFromQubitMapping; remove unused function * move RNG from token swapping to Utils; erase tests/Graphs/RNG * move get_random_set out of token swapping into test utils * move some stuff out of namespace tsa_internal into namespace tket * more cleanup * clang format * added OpType dependency to token swapping * clang format * replace throws with TKET_ASSERT_WITH_MESSAGE * move BruteForceColouring.hpp, ColouringPriority.hpp out of include directory * add TokenSwappingWithArch project; move files out of TokenSwapping * remove architecture from TokenSwapping * move DistancesInterface, NeighboursInterface out of namespace tsa_internal * update TokenSwapping tests to use TokenSwappingWithArch * bool type for delay_measures * Create architecture_test.py * Reduce test times: add TSGlobalTestParameters with run_long_tests option * add test_DebugFunctions.cpp and remove test coverage exclusion * Remove unreachable code. * update architecture binder * Revert "update architecture binder" This reverts commit d7bbd01adf9759adae0aabf1afd88a8678117f06. * formatting, remove nodegraph test * update architecture and mapping tests * add architecture_aware_synthesis_test.py * Create placement_test.py * Update range of python mapping tests * update routing test coverage * Cover Alec's comments * format transform_test * change length of line * remove trailing white space * name -> name_of_ethod * name -> name_of_method RoutingMethod * tokenswappingwitharch * add type ignores * Comment out check_method, rewrite types and methods * Update tests to not use check_method * update test_routingmethod * "name_of_method" -> "name' * Update valid_operation description * reformat transform_test and remove "had" * remove second auto_rebase_pass * Add missing import. * Add option to tket-tests conan build determining whether full tests are run. If tket-tests:full=True, the compiler flag TKET_TESTS_FULL is defined. * remove TokenSwappingWithArch project; move files into Architecture * Set "full tests" option on scheduled CI runs only. * change TKET_ASSERT_WITH_MESSAGE to have parentheses around message * Add note to README. * rename HybridTSA_00 to HybridTsa * Replace TSGlobalTestParameters with #ifdef TKET_TESTS_FULL * commit other forgotten files * Remove GetTketAssertMessage and TKET_ASSERT_WITH_MESSAGE, go back to AssertMessage(). * clang format * Move test_Utils.cpp to test/Utils directory, rename to test_HelperFunctions.cpp * fix "function does not return a value" error * fix "non-void function does not return a value" error, attempt 2! * Forgot to remove final TokenSwappingWithArch bits * update test_json * update python binders and tests remove struct, use 4 element tuple * remove commented out code, format * change type return of method for mypy * address PR Comments * use std::get Co-authored-by: yao-cqc <75305462+yao-cqc@users.noreply.github.com> Co-authored-by: Alec Edgington Co-authored-by: Zen Harper Co-authored-by: cqc-melf <70640934+cqc-melf@users.noreply.github.com> Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> Co-authored-by: melf * [bugfix] [infra] Fix issues revealed by valgrind; add valgrind check on CI. (#242) * Add StandardPass for basic qubit relabelling (#247) * add NaivePlacement Class and naive_placement_mapping_pass * clang format * PR Requested Changes * Add register accessors to Circuit (#246) * Add register accessors to Circuit * Add c_registers and q_registers methods * [TKET-597] MBQC primitives and Flow Analysis (#218) * Add MBQC generators for ZX * Causal and Pauli flow, verification, identification, focus * Focussed set identification * Fix fall through annotation * Add checks for is_graphlike in graphlike rewrites * Move header to include folder * Add files back into CMakeLists * Update binders with new generator hierarchy * Compiler warnings from uninitialised variables * Improve docstrings and test coverage * Fix compiler error on Windows CI * Add method to get all opgroup names (#249) * Add method to get all group names * Improve test * [infra] Don't invoke `setup.py` directly (#250) * [infra] separated test utils from tests (#251) * [feature] [rv3] Generate random nearby placements (#232) * Add new LexirRouteROutingMethod header and cpp files * add bool option to LexiRoute::set_interating_uids * update methods to merge check and route * Remove redundant line in Rebase.cpp (#253) * address pr comments (and failing test) * black formatting * update build_and_test .yml Co-authored-by: yao-cqc <75305462+yao-cqc@users.noreply.github.com> Co-authored-by: Alec Edgington Co-authored-by: Zen Harper Co-authored-by: cqc-melf <70640934+cqc-melf@users.noreply.github.com> Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> Co-authored-by: melf Co-authored-by: Will Simmons Co-authored-by: Luca Mondada <72734770+lmondada@users.noreply.github.com> --- .github/workflows/build_and_test.yml | 2 +- .github/workflows/build_macos_m1_wheel | 5 +- .github/workflows/build_macos_wheel | 7 +- .github/workflows/linuxbuildwheel | 4 +- .github/workflows/release.yml | 15 +- .github/workflows/valgrind.yml | 75 +++ kyriakos.py | 167 ----- pytket/binders/circuit/Circuit/main.cpp | 75 +++ pytket/binders/mapping.cpp | 19 +- pytket/binders/passes.cpp | 7 + pytket/binders/zx/diagram.cpp | 39 +- pytket/docs/changelog.rst | 4 + pytket/pyproject.toml | 7 +- pytket/pytket/mapping/__init__.py | 2 +- pytket/pytket/placement/__init__.py | 2 +- pytket/pytket/zx/tensor_eval.py | 4 +- pytket/setup.py | 30 +- pytket/tests/circuit_test.py | 44 ++ pytket/tests/mapping_test.py | 27 +- pytket/tests/predicates_test.py | 40 +- schemas/compiler_pass_v1.json | 14 + tket/src/Circuit/include/Circuit/Circuit.hpp | 5 + tket/src/Circuit/setters_and_getters.cpp | 11 + tket/src/Mapping/BoxDecomposition.cpp | 58 +- tket/src/Mapping/CMakeLists.txt | 1 + tket/src/Mapping/LexiLabelling.cpp | 34 +- tket/src/Mapping/LexiRoute.cpp | 131 ++-- tket/src/Mapping/LexiRouteRoutingMethod.cpp | 45 ++ tket/src/Mapping/MappingManager.cpp | 13 +- tket/src/Mapping/MultiGateReorder.cpp | 38 +- tket/src/Mapping/RoutingMethodCircuit.cpp | 41 +- tket/src/Mapping/RoutingMethodJson.cpp | 1 + .../include/Mapping/BoxDecomposition.hpp | 32 +- .../Mapping/include/Mapping/LexiLabelling.hpp | 12 +- .../src/Mapping/include/Mapping/LexiRoute.hpp | 57 +- .../Mapping/LexiRouteRoutingMethod.hpp | 60 ++ .../include/Mapping/MultiGateReorder.hpp | 16 +- .../Mapping/include/Mapping/RoutingMethod.hpp | 35 +- .../include/Mapping/RoutingMethodCircuit.hpp | 20 +- .../include/Mapping/RoutingMethodJson.hpp | 3 +- tket/src/PauliGraph/PauliGraph.cpp | 3 +- tket/src/Placement/CMakeLists.txt | 4 +- tket/src/Placement/NeighbourPlacements.cpp | 145 +++++ tket/src/Placement/Placement.cpp | 39 ++ .../include/Placement/NeighbourPlacements.hpp | 99 +++ .../Placement/include/Placement/Placement.hpp | 36 ++ tket/src/Predicates/CompilerPass.cpp | 3 + tket/src/Predicates/PassGenerators.cpp | 26 +- .../include/Predicates/CompilerPass.hpp | 7 + .../include/Predicates/PassGenerators.hpp | 2 + tket/src/Program/Program_iteration.cpp | 3 +- tket/src/TokenSwappingWithArch/CMakeLists.txt | 50 -- tket/src/Transformations/ControlledGates.cpp | 26 +- tket/src/Transformations/Rebase.cpp | 4 +- tket/src/Utils/include/Utils/UnitID.hpp | 1 + tket/src/ZX/CMakeLists.txt | 4 +- tket/src/ZX/Flow.cpp | 611 ++++++++++++++++++ tket/src/ZX/ZXDExpansions.cpp | 41 +- tket/src/ZX/ZXDFormats.cpp | 59 ++ tket/src/ZX/ZXDGettersSetters.cpp | 17 +- tket/src/ZX/ZXGenerator.cpp | 160 ++++- tket/src/ZX/ZXRWAxioms.cpp | 23 +- tket/src/ZX/ZXRWDecompositions.cpp | 4 +- tket/src/ZX/ZXRWGraphLikeForm.cpp | 2 +- tket/src/ZX/ZXRWGraphLikeSimplification.cpp | 20 +- tket/src/ZX/include/ZX/Flow.hpp | 98 +++ tket/src/ZX/include/ZX/ZXDiagram.hpp | 12 +- tket/src/ZX/include/ZX/ZXGenerator.hpp | 90 ++- tket/tests/CMakeLists.txt | 3 +- tket/tests/Circuit/test_Circ.cpp | 8 + .../Placement/test_NeighbourPlacements.cpp | 147 +++++ tket/tests/{ => Placement}/test_Placement.cpp | 73 ++- tket/tests/ZX/test_Flow.cpp | 337 ++++++++++ tket/tests/ZX/test_ZXDiagram.cpp | 17 +- tket/tests/test_BoxDecompRoutingMethod.cpp | 13 + tket/tests/test_CompilerPass.cpp | 3 +- tket/tests/test_LexiRoute.cpp | 60 +- tket/tests/test_MappingManager.cpp | 10 +- tket/tests/test_MappingVerification.cpp | 1 + tket/tests/test_MultiGateReorder.cpp | 18 +- tket/tests/test_RoutingMethod.cpp | 184 +++--- tket/tests/test_RoutingPasses.cpp | 4 +- tket/tests/test_json.cpp | 26 +- tket/tests/tkettestsfiles.cmake | 28 +- tket/tests/tkettestutilsfiles.cmake | 43 ++ 85 files changed, 2888 insertions(+), 878 deletions(-) create mode 100644 .github/workflows/valgrind.yml delete mode 100644 kyriakos.py create mode 100644 tket/src/Mapping/LexiRouteRoutingMethod.cpp create mode 100644 tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp create mode 100644 tket/src/Placement/NeighbourPlacements.cpp create mode 100644 tket/src/Placement/include/Placement/NeighbourPlacements.hpp delete mode 100644 tket/src/TokenSwappingWithArch/CMakeLists.txt create mode 100644 tket/src/ZX/Flow.cpp create mode 100644 tket/src/ZX/ZXDFormats.cpp create mode 100644 tket/src/ZX/include/ZX/Flow.hpp create mode 100644 tket/tests/Placement/test_NeighbourPlacements.cpp rename tket/tests/{ => Placement}/test_Placement.cpp (91%) create mode 100644 tket/tests/ZX/test_Flow.cpp create mode 100644 tket/tests/tkettestutilsfiles.cmake diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 104f353497..67f255b11f 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -5,7 +5,7 @@ on: branches: - main - develop - - feature/RV3.1 + - feature/RV3.1 push: branches: - develop diff --git a/.github/workflows/build_macos_m1_wheel b/.github/workflows/build_macos_m1_wheel index 27136ae12d..b0f1348c4c 100755 --- a/.github/workflows/build_macos_m1_wheel +++ b/.github/workflows/build_macos_m1_wheel @@ -16,9 +16,8 @@ set -evu -pip install conan delocate wheel cd $GITHUB_WORKSPACE/pytket export PYVER=`python -c 'import sys; print(".".join(map(str, sys.version_info[:3])))'` -python -m pip install -U pip setuptools_scm -python setup.py bdist_wheel -d "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}" +python -m pip install -U pip build delocate +python -m build --outdir "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}" delocate-wheel -v -w "$GITHUB_WORKSPACE/wheelhouse/${PYVER}/" "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}/pytket-"*".whl" diff --git a/.github/workflows/build_macos_wheel b/.github/workflows/build_macos_wheel index 95fa5c25ee..0e7e097175 100755 --- a/.github/workflows/build_macos_wheel +++ b/.github/workflows/build_macos_wheel @@ -16,9 +16,10 @@ set -evu -pip install conan delocate wheel cd $GITHUB_WORKSPACE/pytket export PYVER=`python -c 'import sys; print(".".join(map(str, sys.version_info[:3])))'` -python -m pip install -U pip setuptools_scm -python setup.py bdist_wheel -d "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}" --plat-name=macosx_10_14_x86_64 +# Ensure wheels are compatible with MacOS 10.14 and later: +export WHEEL_PLAT_NAME=macosx_10_14_x86_64 +python -m pip install -U pip build delocate +python -m build --outdir "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}" delocate-wheel -v -w "$GITHUB_WORKSPACE/wheelhouse/${PYVER}/" "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}/pytket-"*".whl" diff --git a/.github/workflows/linuxbuildwheel b/.github/workflows/linuxbuildwheel index a0a3f0fb64..5b65fe5f97 100755 --- a/.github/workflows/linuxbuildwheel +++ b/.github/workflows/linuxbuildwheel @@ -42,7 +42,7 @@ do cd /tket/pytket export PYEX=/opt/python/${pyX}/bin/python export PYVER=`${PYEX} -c 'import sys; print(".".join(map(str, sys.version_info[:3])))'` - ${PYEX} -m pip install -U pip setuptools_scm - ${PYEX} setup.py bdist_wheel -d "tmpwheel_${PYVER}" + ${PYEX} -m pip install -U pip build + ${PYEX} -m build --outdir "tmpwheel_${PYVER}" auditwheel repair "tmpwheel_${PYVER}/pytket-"*".whl" -w "audited/${PYVER}/" done diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9b8aeb2dba..10e5223f5d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -179,10 +179,9 @@ jobs: python-version: '3.8' - name: Build wheel (3.8) run: | - pip install wheel cd pytket - python -m pip install -U pip setuptools_scm - python setup.py bdist_wheel -d "${{ github.workspace }}/wheelhouse/3.8" + python -m pip install -U pip build + python -m build --outdir "${{ github.workspace }}/wheelhouse/3.8" - uses: actions/upload-artifact@v2 with: name: Windows_wheels @@ -193,10 +192,9 @@ jobs: python-version: '3.9' - name: Build wheel (3.9) run: | - pip install wheel cd pytket - python -m pip install -U pip setuptools_scm - python setup.py bdist_wheel -d "${{ github.workspace }}/wheelhouse/3.9" + python -m pip install -U pip build + python -m build --outdir "${{ github.workspace }}/wheelhouse/3.9" - uses: actions/upload-artifact@v2 with: name: Windows_wheels @@ -207,10 +205,9 @@ jobs: python-version: '3.10' - name: Build wheel (3.10) run: | - pip install wheel cd pytket - python -m pip install -U pip setuptools_scm - python setup.py bdist_wheel -d "${{ github.workspace }}/wheelhouse/3.10" + python -m pip install -U pip build + python -m build --outdir "${{ github.workspace }}/wheelhouse/3.10" - uses: actions/upload-artifact@v2 with: name: Windows_wheels diff --git a/.github/workflows/valgrind.yml b/.github/workflows/valgrind.yml new file mode 100644 index 0000000000..f53f3e580e --- /dev/null +++ b/.github/workflows/valgrind.yml @@ -0,0 +1,75 @@ +name: valgrind check +on: + pull_request: + branches: + - develop + schedule: + # 03:00 every Monday morning + - cron: '0 3 * * 1' +jobs: + changes: + runs-on: ubuntu-20.04 + outputs: + tket: ${{ steps.filter.outputs.tket }} + steps: + - uses: actions/checkout@v2 + - uses: dorny/paths-filter@v2 + id: filter + with: + base: ${{ github.ref }} + filters: | + tket: + - 'tket/**' + check: + runs-on: ubuntu-20.04 + needs: changes + if: needs.changes.outputs.tket == 'true' + env: + CC: gcc-10 + CXX: g++-10 + CONAN_REVISIONS_ENABLED: 1 + steps: + - uses: actions/checkout@v2 + - name: cache ccache data + uses: actions/cache@v2 + with: + path: ~/.ccache + key: ${{ runner.os }}-tket-ccache-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-ccache- + - name: apt update + run: sudo apt update + - name: Install conan + id: conan + run: | + pip install conan + conan profile new tket --detect + conan profile update settings.compiler.libcxx=libstdc++11 tket + conan profile update options.tket:shared=True tket + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: install tex components + run: | + sudo apt install texlive texlive-latex-extra latexmk + mkdir -p ~/texmf/tex/latex + wget http://mirrors.ctan.org/graphics/pgf/contrib/quantikz/tikzlibraryquantikz.code.tex -P ~/texmf/tex/latex + - name: install valgrind + run: sudo apt install valgrind + - name: install ninja and ccache + run: sudo apt-get install ninja-build ccache + - name: build tket + run: | + conan install recipes/tket --install-folder=build/tket --profile=tket + conan build recipes/tket --configure --build-folder=build/tket --source-folder=tket/src + conan build recipes/tket --build --build-folder=build/tket + conan export-pkg recipes/tket -f --build-folder=build/tket --source-folder=tket/src + - name: build tket tests + run: | + conan install recipes/tket-tests --install-folder=build/tket-tests --profile=tket + conan build recipes/tket-tests --configure --build-folder=build/tket-tests --source-folder=tket/tests + conan build recipes/tket-tests --build --build-folder=build/tket-tests + - name: run tests under valgrind + run: valgrind --error-exitcode=1 ./build/tket-tests/bin/test_tket diff --git a/kyriakos.py b/kyriakos.py deleted file mode 100644 index 7a640eb1c8..0000000000 --- a/kyriakos.py +++ /dev/null @@ -1,167 +0,0 @@ -from pytket import Circuit -from pytket.predicates import CompilationUnit - -circ_dict = { - "bits": [ - ["c", [0]], - ["c", [1]], - ["c", [2]], - ["c", [3]], - ["c", [4]], - ["c", [5]], - ["tk_SCRATCH_BIT", [0]], - ["tk_SCRATCH_BIT", [1]], - ["tk_SCRATCH_BIT", [2]], - ], - "commands": [ - {"args": [["q", [1]], ["q", [3]]], "op": {"type": "CZ"}}, - {"args": [["q", [1]], ["q", [2]]], "op": {"type": "CZ"}}, - {"args": [["q", [4]], ["q", [3]]], "op": {"type": "CZ"}}, - {"args": [["q", [1]], ["q", [0]]], "op": {"type": "CZ"}}, - { - "args": [ - ["q", [0]], - ["q", [1]], - ["q", [2]], - ["q", [3]], - ["q", [4]], - ["q", [5]], - ["c", [0]], - ["c", [1]], - ["c", [2]], - ["c", [3]], - ["c", [4]], - ["c", [5]], - ], - "op": { - "signature": [ - "Q", - "Q", - "Q", - "Q", - "Q", - "Q", - "C", - "C", - "C", - "C", - "C", - "C", - ], - "type": "Barrier", - }, - }, - {"args": [["q", [0]]], "op": {"type": "H"}}, - {"args": [["q", [1]]], "op": {"type": "H"}}, - {"args": [["q", [0]], ["c", [0]]], "op": {"type": "Measure"}}, - {"args": [["q", [1]], ["c", [1]]], "op": {"type": "Measure"}}, - { - "args": [ - ["q", [0]], - ["q", [1]], - ["q", [2]], - ["q", [3]], - ["q", [4]], - ["c", [0]], - ["c", [1]], - ["c", [2]], - ["c", [3]], - ["c", [4]], - ], - "op": { - "signature": ["Q", "Q", "Q", "Q", "Q", "C", "C", "C", "C", "C"], - "type": "Barrier", - }, - }, - {"args": [["q", [0]]], "op": {"type": "Reset"}}, - {"args": [["q", [1]]], "op": {"type": "Reset"}}, - {"args": [["q", [4]]], "op": {"params": ["-0.25"], "type": "Rz"}}, - { - "args": [["c", [1]], ["tk_SCRATCH_BIT", [0]]], - "op": { - "box": { - "exp": {"args": [["c", [1]], False], "op": "BitWiseOp.XOR"}, - "id": "12c10add-5033-437b-b911-f939f97203ed", - "n_i": 1, - "n_io": 0, - "n_o": 1, - "type": "ClassicalExpBox", - }, - "type": "ClassicalExpBox", - }, - }, - { - "args": [["c", [0]], ["tk_SCRATCH_BIT", [1]]], - "op": { - "box": { - "exp": {"args": [["c", [0]], False], "op": "BitWiseOp.XOR"}, - "id": "7d9e1fc7-dac1-4c52-8202-c480ef1897e0", - "n_i": 1, - "n_io": 0, - "n_o": 1, - "type": "ClassicalExpBox", - }, - "type": "ClassicalExpBox", - }, - }, - { - "args": [["c", [0]], ["tk_SCRATCH_BIT", [2]]], - "op": { - "box": { - "exp": {"args": [["c", [0]], False], "op": "BitWiseOp.XOR"}, - "id": "319a085c-42b6-4aa7-8348-cd588f6aa3f5", - "n_i": 1, - "n_io": 0, - "n_o": 1, - "type": "ClassicalExpBox", - }, - "type": "ClassicalExpBox", - }, - }, - { - "args": [["tk_SCRATCH_BIT", [0]], ["q", [2]]], - "op": { - "conditional": {"op": {"type": "X"}, "value": 1, "width": 1}, - "type": "Conditional", - }, - }, - { - "args": [["tk_SCRATCH_BIT", [2]], ["q", [3]]], - "op": { - "conditional": {"op": {"type": "Z"}, "value": 1, "width": 1}, - "type": "Conditional", - }, - }, - { - "args": [["tk_SCRATCH_BIT", [1]], ["q", [2]]], - "op": { - "conditional": {"op": {"type": "Z"}, "value": 1, "width": 1}, - "type": "Conditional", - }, - }, - {"args": [["q", [3]]], "op": {"params": ["-0.25"], "type": "Rz"}}, - {"args": [["q", [2]]], "op": {"params": ["-0.25"], "type": "Rz"}}, - ], - "implicit_permutation": [ - [["q", [0]], ["q", [0]]], - [["q", [1]], ["q", [1]]], - [["q", [2]], ["q", [2]]], - [["q", [3]], ["q", [3]]], - [["q", [4]], ["q", [4]]], - [["q", [5]], ["q", [5]]], - ], - "phase": "0.0", - "qubits": [["q", [0]], ["q", [1]], ["q", [2]], ["q", [3]], ["q", [4]], ["q", [5]]], -} - - -circ = Circuit.from_dict(circ_dict) - -cu = CompilationUnit(circ) -print(cu) - -from pytket.passes import FullMappingPass, RoutingPass, DefaultMappingPass -from pytket.architecture import SquareGrid - -DefaultMappingPass(SquareGrid(4, 4)).apply(cu) -print(cu.circuit) diff --git a/pytket/binders/circuit/Circuit/main.cpp b/pytket/binders/circuit/Circuit/main.cpp index a9072e7ffa..3d12e2d523 100644 --- a/pytket/binders/circuit/Circuit/main.cpp +++ b/pytket/binders/circuit/Circuit/main.cpp @@ -211,6 +211,78 @@ void init_circuit(py::module &m) { "Adds BitRegister to Circuit" "\n\n:param register: BitRegister ", py::arg("register")) + .def( + "get_c_register", + [](Circuit &circ, const std::string &name) { + register_t reg = circ.get_reg(name); + if (reg.size() == 0 || + reg.begin()->second.type() != UnitType::Bit) { + throw CircuitInvalidity( + "Cannot find classical register with name \"" + name + "\"."); + } + return BitRegister(name, reg.size()); + }, + "Get the classical register with the given name.\n\n:param name: " + "name for the register\n:return: the retrieved " + ":py:class:`BitRegister`", + py::arg("name")) + .def_property_readonly( + "c_registers", + [](Circuit &circ) { + bit_vector_t all_bits = circ.all_bits(); + std::map bits_map; + std::vector b_regs; + for (Bit bit : all_bits) { + auto it = bits_map.find(bit.reg_name()); + if (it == bits_map.end()) { + bits_map.insert({bit.reg_name(), 1}); + } else { + it->second++; + } + } + for (auto const &it : bits_map) { + b_regs.push_back(BitRegister(it.first, it.second)); + } + return b_regs; + }, + "Get all classical registers.\n\n:return: List of " + ":py:class:`BitRegister`") + .def( + "get_q_register", + [](Circuit &circ, const std::string &name) { + register_t reg = circ.get_reg(name); + if (reg.size() == 0 || + reg.begin()->second.type() != UnitType::Qubit) { + throw CircuitInvalidity( + "Cannot find quantum register with name \"" + name + "\"."); + } + return QubitRegister(name, reg.size()); + }, + "Get the quantum register with the given name.\n\n:param name: " + "name for the register\n:return: the retrieved " + ":py:class:`QubitRegister`", + py::arg("name")) + .def_property_readonly( + "q_registers", + [](Circuit &circ) { + qubit_vector_t all_qbs = circ.all_qubits(); + std::map qbs_map; + std::vector q_regs; + for (Qubit qb : all_qbs) { + auto it = qbs_map.find(qb.reg_name()); + if (it == qbs_map.end()) { + qbs_map.insert({qb.reg_name(), 1}); + } else { + it->second++; + } + } + for (auto const &it : qbs_map) { + q_regs.push_back(QubitRegister(it.first, it.second)); + } + return q_regs; + }, + "Get all quantum registers.\n\n:return: List of " + ":py:class:`QubitRegister`") .def( "add_qubit", &Circuit::add_qubit, "Constructs a single qubit with the given id.\n\n:param id: " @@ -247,6 +319,9 @@ void init_circuit(py::module &m) { "A qubit will feature in this map if it is " "measured and neither it nor the bit containing the " "measurement result is subsequently acted on") + .def_property_readonly( + "opgroups", &Circuit::get_opgroups, + "A set of all opgroup names in the circuit") .def( "flatten_registers", &Circuit::flatten_registers, "Combines all qubits into a single register namespace with " diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp index a7c947298a..d202f1d2bd 100644 --- a/pytket/binders/mapping.cpp +++ b/pytket/binders/mapping.cpp @@ -45,24 +45,23 @@ PYBIND11_MODULE(mapping, m) { "whole circuits.") .def( py::init< - const std::function( - const Circuit&, const ArchitecturePtr&)>&, - const std::function, + const std::function< + std::tuple( + const Circuit&, const ArchitecturePtr&)>&, unsigned, unsigned>(), "Constructor for a routing method defined by partially routing " "subcircuits.\n\n:param route_subcircuit: A function declaration " "that given a Circuit and Architecture object, returns a tuple " - "containing a new modified circuit, the initial logical to physical " + "containing a bool informing MappingManager whether to substitute " + "the returned circuit into the circuit being routed, " + "a new modified circuit, the initial logical to physical " "qubit mapping of the modified circuit and the permutation of " "logical to physical qubit mapping given operations in the " - "modified circuit\n:param check_subcircuit: A function declaration " - "that given a Circuit and Architecture object, returns a bool " - "stating whether the given method can modify the " - "given circuit\n:param max_size: The maximum number of gates " + "modified circuit\n:param max_size: The maximum number of gates " "permitted in a subcircuit\n:param max_depth: The maximum permitted " "depth of a subcircuit.", - py::arg("route_subcircuit"), py::arg("check_subcircuit"), - py::arg("max_size"), py::arg("max_depth")); + py::arg("route_subcircuit"), py::arg("max_size"), + py::arg("max_depth")); py::class_< LexiRouteRoutingMethod, std::shared_ptr, diff --git a/pytket/binders/passes.cpp b/pytket/binders/passes.cpp index 93572a3aa8..5762faad01 100644 --- a/pytket/binders/passes.cpp +++ b/pytket/binders/passes.cpp @@ -500,6 +500,13 @@ PYBIND11_MODULE(passes, m) { ":py:class:`Architecture` Nodes", py::arg("placer")); + m.def( + "NaivePlacementPass", &gen_naive_placement_pass, + ":param architecture: The Architecture used for relabelling." + "\n:return: a pass to relabel :py:class:`Circuit` Qubits to " + ":py:class:`Architecture` Nodes", + py::arg("arc")); + m.def( "RenameQubitsPass", &gen_rename_qubits_pass, "Rename some or all qubits.", "\n\n:param qubit_map: map from old to new qubit names", diff --git a/pytket/binders/zx/diagram.cpp b/pytket/binders/zx/diagram.cpp index c834e018a1..57fccad0c6 100644 --- a/pytket/binders/zx/diagram.cpp +++ b/pytket/binders/zx/diagram.cpp @@ -430,6 +430,31 @@ PYBIND11_MODULE(zx, m) { "value. Can either be Quantum or Classical - Quantum spiders can " "only have Quantum wires, Quantum wires on Classical spiders act as " "two wires. Can have arbitrary degree. No ports.") + .value( + "XY", ZXType::XY, + "A (postselected) XY qubit in MBQC. Corresponds to a Z spider with " + "negative phase.") + .value( + "XZ", ZXType::XZ, + "A (postselected) XZ qubit in MBQC. Corresponds to a 0.5-phase " + "(n+1)-ary Z spider connected to a phaseful 1-ary X spider.") + .value( + "YZ", ZXType::YZ, + "A (postselected) YZ qubit in MBQC. Corresponds to a 0-phase " + "(n+1)-ary Z spider connected to a phaseful 1-ary X spider.") + .value( + "PX", ZXType::PX, + "A (postselected) Pauli X qubit in MBQC. Corresponds to a Z spider " + "with phase either 0 (param=False) or 1 (param=True).") + .value( + "PY", ZXType::PY, + "A (postselected) Pauli Y qubit in MBQC. Corresponds to a Z spider " + "with phase either -0.5 (param=False) or +0.5 (param=True).") + .value( + "PZ", ZXType::PZ, + "A (postselected) Pauli Z qubit in MBQC. Corresponds to a 0-phase " + "(n+1)-ary Z spider connected to a 1-ary X spider with phase either " + "0 (param=False) or 1 (param=True).") .value( "Triangle", ZXType::Triangle, "A Triangle operator, [[1, 1], [0, 1]]. Can either be Quantum or " @@ -508,12 +533,18 @@ PYBIND11_MODULE(zx, m) { "The :py:class:`QuantumType` of the generator (if applicable).") .def("__eq__", &ZXGen::operator==) .def("__repr__", [](const ZXGen& gen) { return gen.get_name(); }); - py::class_, ZXGen>( - m, "BasicGen", + py::class_, ZXGen>( + m, "PhasedGen", + "Specialisation of :py:class:`ZXGen` for arbitrary-arity, symmetric " + "generators with a single continuous parameter.") + .def_property_readonly( + "param", &PhasedGen::get_param, "The parameter of the generator."); + py::class_, ZXGen>( + m, "CliffordGen", "Specialisation of :py:class:`ZXGen` for arbitrary-arity, symmetric " - "generators.") + "Clifford generators with a single boolean parameter.") .def_property_readonly( - "param", &BasicGen::get_param, "The parameter of the generator."); + "param", &CliffordGen::get_param, "The parameter of the generator."); py::class_, ZXGen>( m, "DirectedGen", "Specialisation of :py:class:`ZXGen` for asymmetric ZX generators which " diff --git a/pytket/docs/changelog.rst b/pytket/docs/changelog.rst index cd2c5079da..7a210b9b86 100644 --- a/pytket/docs/changelog.rst +++ b/pytket/docs/changelog.rst @@ -31,6 +31,10 @@ Minor new features: * New ``pytket.passes.auto_rebase_pass`` and ``pytket.passes.auto_squash_pass`` which attempt to construct rebase and squash passess given a target gate set from known decompositions. +* Add ``get_c_register``, ``get_q_register``, ``c_registers`` and ``q_registers`` methods to ``Circuit``. +* New ``pytket.passes.NaivePlacementPass`` which completes a basic relabelling of all Circuit Qubit + not labelled as some Architecture Node to any available Architecture Node +* Add ``opgroups`` property to ``Circuit``. 0.19.2 (February 2022) ---------------------- diff --git a/pytket/pyproject.toml b/pytket/pyproject.toml index dd0574d68c..ccc3aa56df 100644 --- a/pytket/pyproject.toml +++ b/pytket/pyproject.toml @@ -1,8 +1,3 @@ [build-system] -requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.2", "conan"] +requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.4", "conan"] build-backend = "setuptools.build_meta" - -[tool.setuptools_scm] -root = ".." -write_to = "pytket/pytket/_version.py" -write_to_template = '__version__ = "{version}"' diff --git a/pytket/pytket/mapping/__init__.py b/pytket/pytket/mapping/__init__.py index d932378d5d..a413b474b3 100644 --- a/pytket/pytket/mapping/__init__.py +++ b/pytket/pytket/mapping/__init__.py @@ -17,4 +17,4 @@ mapping logical circuits to physical circuits and for defining custom routing solutions.""" -from pytket._tket.mapping import * # type: ignore \ No newline at end of file +from pytket._tket.mapping import * # type: ignore diff --git a/pytket/pytket/placement/__init__.py b/pytket/pytket/placement/__init__.py index 32c27ae5df..4b7ae79613 100644 --- a/pytket/pytket/placement/__init__.py +++ b/pytket/pytket/placement/__init__.py @@ -17,4 +17,4 @@ logical circuit qubit identifiers to physical architecture node identifiers, for the purpose of compilation.""" -from pytket._tket.placement import * # type: ignore \ No newline at end of file +from pytket._tket.placement import * # type: ignore diff --git a/pytket/pytket/zx/tensor_eval.py b/pytket/pytket/zx/tensor_eval.py index a922ed8e2f..a9071aa726 100644 --- a/pytket/pytket/zx/tensor_eval.py +++ b/pytket/pytket/zx/tensor_eval.py @@ -17,10 +17,10 @@ from typing import Dict, List, Any from math import floor, pi, sqrt import numpy as np -from pytket.zx import ZXDiagram, ZXType, ZXVert, BasicGen, QuantumType, Rewrite # type: ignore +from pytket.zx import ZXDiagram, ZXType, ZXVert, PhasedGen, QuantumType, Rewrite # type: ignore -def _spider_to_tensor(gen: BasicGen, rank: int) -> np.ndarray: +def _spider_to_tensor(gen: PhasedGen, rank: int) -> np.ndarray: try: if gen.type == ZXType.Hbox: param_c = complex(gen.param) diff --git a/pytket/setup.py b/pytket/setup.py index 926f664882..028f15511f 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io import os import platform import re @@ -22,11 +21,12 @@ import shutil from multiprocessing import cpu_count from distutils.version import LooseVersion +from concurrent.futures import ThreadPoolExecutor as Pool +from shutil import which import setuptools # type: ignore from setuptools import setup, Extension from setuptools.command.build_ext import build_ext # type: ignore -from concurrent.futures import ThreadPoolExecutor as Pool -from shutil import which +from wheel.bdist_wheel import bdist_wheel as _bdist_wheel class CMakeExtension(Extension): @@ -221,6 +221,18 @@ def build_extension(self, ext): "architecture", ] +setup_dir = os.path.abspath(os.path.dirname(__file__)) +plat_name = os.getenv("WHEEL_PLAT_NAME") + + +class bdist_wheel(_bdist_wheel): + def finalize_options(self): + _bdist_wheel.finalize_options(self) + if plat_name is not None: + print(f"Overriding plat_name to {plat_name}") + self.plat_name = plat_name + self.plat_name_supplied = True + setup( name="pytket", @@ -228,7 +240,8 @@ def build_extension(self, ext): author_email="seyon.sivarajah@cambridgequantum.com", python_requires=">=3.8", url="https://cqcl.github.io/pytket", - description="Python module for interfacing with the CQC tket library of quantum software", + description="Python module for interfacing with the CQC tket library of quantum " + "software", license="Apache 2", packages=setuptools.find_packages(), install_requires=[ @@ -245,9 +258,7 @@ def build_extension(self, ext): ext_modules=[ CMakeExtension("pytket._tket.{}".format(binder)) for binder in binders ], - cmdclass={ - "build_ext": CMakeBuild, - }, + cmdclass={"build_ext": CMakeBuild, "bdist_wheel": bdist_wheel}, classifiers=[ "Environment :: Console", "Programming Language :: Python :: 3.8", @@ -264,4 +275,9 @@ def build_extension(self, ext): include_package_data=True, package_data={"pytket": ["py.typed"]}, zip_safe=False, + use_scm_version={ + "root": os.path.dirname(setup_dir), + "write_to": os.path.join(setup_dir, "pytket", "_version.py"), + "write_to_template": "__version__ = '{version}'", + }, ) diff --git a/pytket/tests/circuit_test.py b/pytket/tests/circuit_test.py index 6954a6356a..404d644cdd 100644 --- a/pytket/tests/circuit_test.py +++ b/pytket/tests/circuit_test.py @@ -33,6 +33,8 @@ CustomGateDef, Qubit, Bit, + BitRegister, + QubitRegister, ) from pytket.circuit.display import render_circuit_as_html @@ -676,13 +678,16 @@ def test_opgroups() -> None: # Remove a redundant gate c = Circuit(3).H(0) + assert len(c.opgroups) == 0 c.CX(0, 1, opgroup="cx0") c.CX(1, 2, opgroup="cx1") c.CX(2, 0, opgroup="cx2") c.CX(0, 1, opgroup="cx3") + assert c.opgroups == {"cx0", "cx1", "cx2", "cx3"} c.substitute_named(Circuit(2), "cx3") assert c.n_gates == 4 assert c.n_gates_of_type(OpType.CX) == 3 + assert c.opgroups == {"cx0", "cx1", "cx2"} def test_phase_polybox() -> None: @@ -740,6 +745,45 @@ def test_clifford_checking() -> None: assert m.is_clifford_type() == False +def test_getting_registers() -> None: + c = Circuit(2, 1) + c_regs = c.c_registers + assert len(c_regs) == 1 + assert c_regs[0] == BitRegister("c", 1) + q_regs = c.q_registers + assert len(q_regs) == 1 + assert q_regs[0] == QubitRegister("q", 2) + q_err_msg = "Cannot find quantum register with name" + c_err_msg = "Cannot find classical register with name" + with pytest.raises(RuntimeError) as e: + c.get_c_register("q") + assert c_err_msg in str(e.value) + with pytest.raises(RuntimeError) as e: + c.get_q_register("c") + assert q_err_msg in str(e.value) + assert c.get_c_register("c").name == "c" + assert c.get_c_register("c").size == 1 + assert c.get_q_register("q").name == "q" + assert c.get_q_register("q").size == 2 + c.add_q_register("test_qr", 10) + c.add_c_register("test_cr", 8) + assert c.get_c_register("test_cr").name == "test_cr" + assert c.get_c_register("test_cr").size == 8 + assert c.get_q_register("test_qr").name == "test_qr" + assert c.get_q_register("test_qr").size == 10 + + c_regs = c.c_registers + c_regs.sort() + assert len(c_regs) == 2 + assert c_regs[0] == BitRegister("c", 1) + assert c_regs[1] == BitRegister("test_cr", 8) + q_regs = c.q_registers + q_regs.sort() + assert len(q_regs) == 2 + assert q_regs[0] == QubitRegister("q", 2) + assert q_regs[1] == QubitRegister("test_qr", 10) + + if __name__ == "__main__": test_circuit_gen() test_symbolic_ops() diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py index a739c7b0ed..e3f67d808e 100644 --- a/pytket/tests/mapping_test.py +++ b/pytket/tests/mapping_test.py @@ -23,7 +23,7 @@ # simple deterministic heuristic used for testing purposes def route_subcircuit_func( circuit: Circuit, architecture: Architecture -) -> Tuple[Circuit, Dict[Node, Node], Dict[Node, Node]]: +) -> Tuple[bool, Circuit, Dict[Node, Node], Dict[Node, Node]]: # make a replacement circuit with identical unitds replacement_circuit = Circuit() for qb in circuit.qubits: @@ -60,7 +60,7 @@ def route_subcircuit_func( for com in circuit.get_commands(): rp_qubits = [permutation_map[relabelling_map[q]] for q in com.qubits] if len(com.qubits) > 2: - raise ValueError("Command must have maximum two qubits") + return (False, Circuit(), {}, {}) if len(com.qubits) == 1: replacement_circuit.add_gate(com.op.type, rp_qubits) if len(com.qubits) == 2: @@ -90,15 +90,13 @@ def route_subcircuit_func( replacement_circuit.add_gate(com.op.type, rp_qubits) - return (replacement_circuit, relabelling_map, permutation_map) + return (True, replacement_circuit, relabelling_map, permutation_map) -def check_subcircuit_func_true(circuit: Circuit, architecture: Architecture) -> bool: - return True - - -def check_subcircuit_func_false(circuit: Circuit, architecture: Architecture) -> bool: - return False +def route_subcircuit_func_false( + circuit: Circuit, architecture: Architecture +) -> Tuple[bool, Circuit, Dict[Node, Node], Dict[Node, Node]]: + return (False, Circuit(), {}, {}) def test_LexiRouteRoutingMethod() -> None: @@ -127,7 +125,7 @@ def test_RoutingMethodCircuit_custom() -> None: test_mm = MappingManager(test_a) test_mm.route_circuit( test_c, - [RoutingMethodCircuit(route_subcircuit_func, check_subcircuit_func_true, 5, 5)], + [RoutingMethodCircuit(route_subcircuit_func, 5, 5)], ) routed_commands = test_c.get_commands() @@ -152,15 +150,12 @@ def test_RoutingMethodCircuit_custom_list() -> None: test_mm.route_circuit( test_c, [ - RoutingMethodCircuit( - route_subcircuit_func, check_subcircuit_func_false, 5, 5 - ), + RoutingMethodCircuit(route_subcircuit_func_false, 5, 5), LexiLabellingMethod(), LexiRouteRoutingMethod(), ], ) routed_commands = test_c.get_commands() - assert routed_commands[0].op.type == OpType.CX assert routed_commands[0].qubits == [nodes[1], nodes[0]] assert routed_commands[1].op.type == OpType.CX @@ -174,9 +169,7 @@ def test_RoutingMethodCircuit_custom_list() -> None: test_mm.route_circuit( test_c, [ - RoutingMethodCircuit( - route_subcircuit_func, check_subcircuit_func_true, 5, 5 - ), + RoutingMethodCircuit(route_subcircuit_func, 5, 5), LexiLabellingMethod(), LexiRouteRoutingMethod(), ], diff --git a/pytket/tests/predicates_test.py b/pytket/tests/predicates_test.py index d02823d215..1ecdbf9bde 100644 --- a/pytket/tests/predicates_test.py +++ b/pytket/tests/predicates_test.py @@ -32,6 +32,7 @@ RoutingPass, CXMappingPass, PlacementPass, + NaivePlacementPass, RenameQubitsPass, FullMappingPass, DefaultMappingPass, @@ -202,14 +203,29 @@ def test_routing_and_placement_pass() -> None: pl = Placement(arc) routing = RoutingPass(arc) placement = PlacementPass(pl) + nplacement = NaivePlacementPass(arc) cu = CompilationUnit(circ.copy()) assert placement.apply(cu) assert routing.apply(cu) + assert nplacement.apply(cu) expected_map = {q[0]: n1, q[1]: n0, q[2]: n2, q[3]: n5, q[4]: n3} assert cu.initial_map == expected_map + cu1 = CompilationUnit(circ.copy()) + assert nplacement.apply(cu1) + arcnodes = arc.nodes + expected_nmap = { + q[0]: arcnodes[0], + q[1]: arcnodes[1], + q[2]: arcnodes[2], + q[3]: arcnodes[3], + q[4]: arcnodes[4], + } + assert cu1.initial_map == expected_nmap # check composition works ok - seq_pass = SequencePass([SynthesiseTket(), placement, routing, SynthesiseUMD()]) + seq_pass = SequencePass( + [SynthesiseTket(), placement, routing, nplacement, SynthesiseUMD()] + ) cu2 = CompilationUnit(circ.copy()) assert seq_pass.apply(cu2) assert cu2.initial_map == expected_map @@ -225,7 +241,7 @@ def test_routing_and_placement_pass() -> None: def test_default_mapping_pass() -> None: circ = Circuit() - q = circ.add_q_register("q", 5) + q = circ.add_q_register("q", 6) circ.CX(0, 1) circ.H(0) circ.Z(1) @@ -235,14 +251,17 @@ def test_default_mapping_pass() -> None: circ.X(2) circ.CX(1, 4) circ.CX(0, 4) + circ.H(5) n0 = Node("b", 0) n1 = Node("b", 1) n2 = Node("b", 2) n3 = Node("a", 0) n4 = Node("f", 0) - arc = Architecture([[n0, n1], [n1, n2], [n2, n3], [n3, n4]]) + n5 = Node("g", 7) + arc = Architecture([[n0, n1], [n1, n2], [n2, n3], [n3, n4], [n4, n5]]) pl = GraphPlacement(arc) + nplacement = NaivePlacementPass(arc) routing = RoutingPass(arc) placement = PlacementPass(pl) default = DefaultMappingPass(arc) @@ -251,6 +270,7 @@ def test_default_mapping_pass() -> None: assert placement.apply(cu_rp) assert routing.apply(cu_rp) + assert nplacement.apply(cu_rp) assert default.apply(cu_def) assert cu_rp.circuit == cu_def.circuit @@ -652,6 +672,10 @@ def sq(a: float, b: float, c: float) -> Circuit: assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass" assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement" assert p_pass.to_dict()["StandardPass"]["placement"]["config"]["depth_limit"] == 5 + # NaivePlacementPass + np_pass = NaivePlacementPass(arc) + assert np_pass.to_dict()["StandardPass"]["name"] == "NaivePlacementPass" + assert check_arc_dict(arc, np_pass.to_dict()["StandardPass"]["architecture"]) # RenameQubitsPass qm = {Qubit("a", 0): Qubit("b", 1), Qubit("a", 1): Qubit("b", 0)} rn_pass = RenameQubitsPass(qm) @@ -666,8 +690,10 @@ def sq(a: float, b: float, c: float) -> Circuit: assert fm_pass.to_dict()["pass_class"] == "SequencePass" p_pass = fm_pass.get_sequence()[0] r_pass = fm_pass.get_sequence()[1] - assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass" + np_pass = fm_pass.get_sequence()[2] + assert np_pass.to_dict()["StandardPass"]["name"] == "NaivePlacementPass" assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass" + assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass" assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"]) assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement" # DefaultMappingPass @@ -675,20 +701,24 @@ def sq(a: float, b: float, c: float) -> Circuit: assert dm_pass.to_dict()["pass_class"] == "SequencePass" p_pass = dm_pass.get_sequence()[0].get_sequence()[0] r_pass = dm_pass.get_sequence()[0].get_sequence()[1] + np_pass = dm_pass.get_sequence()[0].get_sequence()[2] d_pass = dm_pass.get_sequence()[1] assert d_pass.to_dict()["StandardPass"]["name"] == "DelayMeasures" assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass" + assert np_pass.to_dict()["StandardPass"]["name"] == "NaivePlacementPass" assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass" assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"]) assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement" # DefaultMappingPass with delay_measures=False dm_pass = DefaultMappingPass(arc, False) assert dm_pass.to_dict()["pass_class"] == "SequencePass" - assert len(dm_pass.get_sequence()) == 2 + assert len(dm_pass.get_sequence()) == 3 p_pass = dm_pass.get_sequence()[0] r_pass = dm_pass.get_sequence()[1] + np_pass = dm_pass.get_sequence()[2] assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass" assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass" + assert np_pass.to_dict()["StandardPass"]["name"] == "NaivePlacementPass" assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"]) assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement" # AASRouting diff --git a/schemas/compiler_pass_v1.json b/schemas/compiler_pass_v1.json index fa74d46b26..0a473e7129 100644 --- a/schemas/compiler_pass_v1.json +++ b/schemas/compiler_pass_v1.json @@ -403,6 +403,20 @@ ] } }, + { + "if": { + "properties": { + "name": { + "const": "NaivePlacementPass" + } + } + }, + "then": { + "required": [ + "architecture" + ] + } + }, { "if": { "properties": { diff --git a/tket/src/Circuit/include/Circuit/Circuit.hpp b/tket/src/Circuit/include/Circuit/Circuit.hpp index fc6ede8f71..e6c7db4aa0 100644 --- a/tket/src/Circuit/include/Circuit/Circuit.hpp +++ b/tket/src/Circuit/include/Circuit/Circuit.hpp @@ -636,6 +636,11 @@ class Circuit { const std::optional &get_opgroup_from_Vertex( const Vertex &vert) const; + /** + * Get the set of all opgroup names. + */ + const std::unordered_set get_opgroups() const; + // O(1) (lookup in hashtable) OpDesc get_OpDesc_from_Vertex(const Vertex &vert) const; OpType get_OpType_from_Vertex(const Vertex &vert) const; diff --git a/tket/src/Circuit/setters_and_getters.cpp b/tket/src/Circuit/setters_and_getters.cpp index bc075860bb..00ed263b3b 100644 --- a/tket/src/Circuit/setters_and_getters.cpp +++ b/tket/src/Circuit/setters_and_getters.cpp @@ -558,6 +558,17 @@ const std::optional &Circuit::get_opgroup_from_Vertex( return this->dag[vert].opgroup; } +const std::unordered_set Circuit::get_opgroups() const { + std::unordered_set opgroups; + BGL_FORALL_VERTICES(v, dag, DAG) { + std::optional v_opgroup = get_opgroup_from_Vertex(v); + if (v_opgroup) { + opgroups.insert(v_opgroup.value()); + } + } + return opgroups; +} + void Circuit::set_vertex_Op_ptr(const Vertex &vert, const Op_ptr &op) { this->dag[vert].op = op; } diff --git a/tket/src/Mapping/BoxDecomposition.cpp b/tket/src/Mapping/BoxDecomposition.cpp index cd52143ece..38d22fce71 100644 --- a/tket/src/Mapping/BoxDecomposition.cpp +++ b/tket/src/Mapping/BoxDecomposition.cpp @@ -1,3 +1,16 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "Mapping/BoxDecomposition.hpp" #include "Mapping/MappingFrontier.hpp" @@ -9,10 +22,10 @@ BoxDecomposition::BoxDecomposition( std::shared_ptr &_mapping_frontier) : architecture_(_architecture), mapping_frontier_(_mapping_frontier) {} -void BoxDecomposition::solve() { +bool BoxDecomposition::solve() { // Box type vertices are later removed from DAG VertexList bin; - + bool modified = false; std::shared_ptr frontier_edges = frontier_convert_vertport_to_edge( this->mapping_frontier_->circuit_, @@ -20,41 +33,34 @@ void BoxDecomposition::solve() { CutFrontier next_cut = this->mapping_frontier_->circuit_.next_q_cut(frontier_edges); for (Vertex &vert : *next_cut.slice) { - if (this->mapping_frontier_->circuit_.substitute_box_vertex( - vert, Circuit::VertexDeletion::No)) - bin.push_back(vert); + Op_ptr op = this->mapping_frontier_->circuit_.get_Op_ptr_from_Vertex(vert); + if (op->get_desc().is_box() || + (op->get_type() == OpType::Conditional && + static_cast(*op).get_op()->get_desc().is_box())) { + if (this->mapping_frontier_->circuit_.substitute_box_vertex( + vert, Circuit::VertexDeletion::No)) { + modified = true; + bin.push_back(vert); + } + } + } + if (!modified) { + return false; } - // Delete vertices this->mapping_frontier_->circuit_.remove_vertices( bin, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); + return true; } BoxDecompositionRoutingMethod::BoxDecompositionRoutingMethod(){}; -bool BoxDecompositionRoutingMethod::check_method( - const std::shared_ptr &mapping_frontier, - const ArchitecturePtr & /*architecture*/) const { - std::shared_ptr frontier_edges = - frontier_convert_vertport_to_edge( - mapping_frontier->circuit_, mapping_frontier->quantum_boundary); - CutFrontier next_cut = mapping_frontier->circuit_.next_q_cut(frontier_edges); - for (const Vertex &vert : *next_cut.slice) { - Op_ptr op = mapping_frontier->circuit_.get_Op_ptr_from_Vertex(vert); - if (op->get_desc().is_box() || - (op->get_type() == OpType::Conditional && - static_cast(*op).get_op()->get_desc().is_box())) - return true; - } - return false; -} - -unit_map_t BoxDecompositionRoutingMethod::routing_method( +std::pair BoxDecompositionRoutingMethod::routing_method( std::shared_ptr &mapping_frontier, const ArchitecturePtr &architecture) const { BoxDecomposition bd(architecture, mapping_frontier); - bd.solve(); - return {}; + bool modified = bd.solve(); + return {modified, {}}; } nlohmann::json BoxDecompositionRoutingMethod::serialize() const { diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt index dd39ce01f8..a300ee831f 100644 --- a/tket/src/Mapping/CMakeLists.txt +++ b/tket/src/Mapping/CMakeLists.txt @@ -21,6 +21,7 @@ endif() add_library(tket-${COMP} LexicographicalComparison.cpp LexiRoute.cpp + LexiRouteRoutingMethod.cpp LexiLabelling.cpp MappingFrontier.cpp MappingManager.cpp diff --git a/tket/src/Mapping/LexiLabelling.cpp b/tket/src/Mapping/LexiLabelling.cpp index 1524c98aac..7455614bb2 100644 --- a/tket/src/Mapping/LexiLabelling.cpp +++ b/tket/src/Mapping/LexiLabelling.cpp @@ -15,41 +15,11 @@ namespace tket { -bool LexiLabellingMethod::check_method( - const std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const { - std::shared_ptr frontier_edges = - frontier_convert_vertport_to_edge( - mapping_frontier->circuit_, mapping_frontier->quantum_boundary); - CutFrontier next_cut = mapping_frontier->circuit_.next_q_cut(frontier_edges); - - for (const Vertex& vert : *next_cut.slice) { - EdgeVec ev = mapping_frontier->circuit_.get_in_edges_of_type( - vert, EdgeType::Quantum); - // lexilabelling can't support dynamic labelling of >2 qubit gates - if (ev.size() > 2) { - return false; - } - for (const Edge& e : ev) { - for (const std::pair& pair : - frontier_edges->get()) { - if (pair.second == e) { - if (!architecture->node_exists(Node(pair.first))) { - return true; - } - } - } - } - } - return false; -} - -unit_map_t LexiLabellingMethod::routing_method( +std::pair LexiLabellingMethod::routing_method( std::shared_ptr& mapping_frontier, const ArchitecturePtr& architecture) const { LexiRoute lr(architecture, mapping_frontier); - lr.solve_labelling(); - return {}; + return {lr.solve_labelling(), {}}; } nlohmann::json LexiLabellingMethod::serialize() const { diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index ef49ae41e1..adfcf1f8af 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -23,7 +23,6 @@ LexiRoute::LexiRoute( const ArchitecturePtr& _architecture, std::shared_ptr& _mapping_frontier) : architecture_(_architecture), mapping_frontier_(_mapping_frontier) { - this->set_interacting_uids(); // set initial logical->physical labelling for (const Qubit& qb : this->mapping_frontier_->circuit_.all_qubits()) { this->labelling_.insert({qb, qb}); @@ -178,9 +177,11 @@ bool LexiRoute::update_labelling() { * Updates this->interacting_uids_ with all "interacting" pairs * of UnitID in this->mapping_frontier_ */ -void LexiRoute::set_interacting_uids(bool assigned_only) { +bool LexiRoute::set_interacting_uids( + bool assigned_only, bool route_check, bool label_check) { // return types this->interacting_uids_.clear(); + bool all_placed = true; for (auto it = this->mapping_frontier_->quantum_boundary->get().begin(); it != this->mapping_frontier_->quantum_boundary->get().end(); @@ -189,8 +190,8 @@ void LexiRoute::set_interacting_uids(bool assigned_only) { it->second.first, it->second.second); Vertex v0 = this->mapping_frontier_->circuit_.target(e0); // should never be input vertex, so can always use in_edges - if (this->mapping_frontier_->circuit_.get_OpType_from_Vertex(v0) != - OpType::Barrier) { + Op_ptr op = this->mapping_frontier_->circuit_.get_Op_ptr_from_Vertex(v0); + if (op->get_type() != OpType::Barrier) { int n_edges = this->mapping_frontier_->circuit_.n_in_edges_of_type( v0, EdgeType::Quantum); // make forwards = backwards @@ -207,9 +208,16 @@ void LexiRoute::set_interacting_uids(bool assigned_only) { // we can assume a qubit will only be in one interaction // we can assume from how we iterate through pairs that each qubit // will only be found in one match - if (!assigned_only || - (this->architecture_->node_exists(Node(it->first)) && - this->architecture_->node_exists(Node(jt->first)))) { + bool node0_exists = + this->architecture_->node_exists(Node(it->first)); + bool node1_exists = + this->architecture_->node_exists(Node(jt->first)); + if (!node0_exists || !node1_exists || op->get_desc().is_box()) { + all_placed = false; + if (route_check) return false; + } + + if (!assigned_only || (node0_exists && node1_exists)) { interacting_uids_.insert({it->first, jt->first}); interacting_uids_.insert({jt->first, it->first}); } @@ -220,11 +228,33 @@ void LexiRoute::set_interacting_uids(bool assigned_only) { n_edges > 2 && this->mapping_frontier_->circuit_.get_OpType_from_Vertex(v0) != OpType::Barrier) { + if (label_check) return true; + if (route_check) return false; throw LexiRouteError( "LexiRoute only supports non-Barrier vertices with 1 or 2 edges."); } } } + + // conditions for proceeding with labelling + if (label_check) { + if (all_placed) { + return true; + } else { + return false; + } + } + // this should have left early when first found + if (route_check) { + if (all_placed) { + return true; + } else { + return false; + } + } + // => either route_check true and all_placed so valid + // or !route_check and !label_check so return true and discard + return true; } swap_set_t LexiRoute::get_candidate_swaps() { @@ -400,13 +430,24 @@ void LexiRoute::remove_swaps_decreasing(swap_set_t& swaps) { } } -void LexiRoute::solve_labelling() { - this->update_labelling(); - this->mapping_frontier_->update_quantum_boundary_uids(this->labelling_); - return; +bool LexiRoute::solve_labelling() { + bool all_labelled = this->set_interacting_uids(false, false, true); + if (!all_labelled) { + this->update_labelling(); + this->mapping_frontier_->update_quantum_boundary_uids(this->labelling_); + return true; + } + return false; } -void LexiRoute::solve(unsigned lookahead) { +bool LexiRoute::solve(unsigned lookahead) { + // work out if valid + + bool all_labelled = this->set_interacting_uids(false, true, false); + if (!all_labelled) { + return false; + } + // store a copy of the original this->mapping_frontier_->quantum_boundray // this object will be updated and reset throughout the swap picking procedure // so need to return it to original setting at end @@ -501,73 +542,7 @@ void LexiRoute::solve(unsigned lookahead) { add_ordered_bridge(chosen_swap.second); } } - return; -} - -LexiRouteRoutingMethod::LexiRouteRoutingMethod(unsigned _max_depth) - : max_depth_(_max_depth){}; - -bool LexiRouteRoutingMethod::check_method( - const std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const { - std::shared_ptr frontier_edges = - frontier_convert_vertport_to_edge( - mapping_frontier->circuit_, mapping_frontier->quantum_boundary); - CutFrontier next_cut = mapping_frontier->circuit_.next_q_cut(frontier_edges); - for (const Vertex& vert : *next_cut.slice) { - Op_ptr op = mapping_frontier->circuit_.get_Op_ptr_from_Vertex(vert); - // can't work wih box ops, or gates with more than 2 qubits that aren't a - // BRIDGE - - if ((mapping_frontier->circuit_.n_in_edges_of_type( - vert, EdgeType::Quantum) > 2 && - op->get_type() != OpType::BRIDGE) || - (op->get_desc().is_box() || (op->get_type() == OpType::Conditional && - static_cast(*op) - .get_op() - ->get_desc() - .is_box()))) { - return false; - } else { - // second check that all input UnitID are actually in architecture - for (const Edge& e : mapping_frontier->circuit_.get_in_edges_of_type( - vert, EdgeType::Quantum)) { - for (const std::pair& pair : - frontier_edges->get()) { - if (pair.second == e) { - if (!architecture->node_exists(Node(pair.first))) { - return false; - } - } - } - } - } - } return true; } -unit_map_t LexiRouteRoutingMethod::routing_method( - std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const { - LexiRoute lr(architecture, mapping_frontier); - lr.solve(this->max_depth_); - return {}; -} - -unsigned LexiRouteRoutingMethod::get_max_depth() const { - return this->max_depth_; -} - -nlohmann::json LexiRouteRoutingMethod::serialize() const { - nlohmann::json j; - j["depth"] = this->get_max_depth(); - j["name"] = "LexiRouteRoutingMethod"; - return j; -} - -LexiRouteRoutingMethod LexiRouteRoutingMethod::deserialize( - const nlohmann::json& j) { - return LexiRouteRoutingMethod(j.at("depth").get()); -} - } // namespace tket diff --git a/tket/src/Mapping/LexiRouteRoutingMethod.cpp b/tket/src/Mapping/LexiRouteRoutingMethod.cpp new file mode 100644 index 0000000000..9ec5b05f1d --- /dev/null +++ b/tket/src/Mapping/LexiRouteRoutingMethod.cpp @@ -0,0 +1,45 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Mapping/LexiRouteRoutingMethod.hpp" + +namespace tket { + +LexiRouteRoutingMethod::LexiRouteRoutingMethod(unsigned _max_depth) + : max_depth_(_max_depth){}; + +std::pair LexiRouteRoutingMethod::routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + LexiRoute lr(architecture, mapping_frontier); + return {lr.solve(this->max_depth_), {}}; +} + +unsigned LexiRouteRoutingMethod::get_max_depth() const { + return this->max_depth_; +} + +nlohmann::json LexiRouteRoutingMethod::serialize() const { + nlohmann::json j; + j["depth"] = this->get_max_depth(); + j["name"] = "LexiRouteRoutingMethod"; + return j; +} + +LexiRouteRoutingMethod LexiRouteRoutingMethod::deserialize( + const nlohmann::json& j) { + return LexiRouteRoutingMethod(j.at("depth").get()); +} + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index c7864d64c5..7ae89751af 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -71,20 +71,19 @@ bool MappingManager::route_circuit_with_maps( bool circuit_modified = !check_finish(); while (!check_finish()) { // The order methods are passed in std::vector is - // the order they are iterated through to call "check_method" + // the order they are run // If a method performs better but only on specific subcircuits, // rank it earlier in the passed vector bool valid_methods = false; for (const auto& rm : routing_methods) { // true => can use held routing method - if (rm->check_method(mapping_frontier, this->architecture_)) { + std::pair bool_map = + rm->routing_method(mapping_frontier, this->architecture_); + if (bool_map.first) { valid_methods = true; - unit_map_t partial_permutation = - rm->routing_method(mapping_frontier, this->architecture_); - - if (partial_permutation.size() > 0) { + if (bool_map.second.size() > 0) { std::map node_map; - for (const auto& x : partial_permutation) { + for (const auto& x : bool_map.second) { node_map.insert({Node(x.first), Node(x.second)}); } for (const std::pair& swap : diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index 3e0cea53ff..27f521d962 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -185,7 +185,7 @@ static void partial_rewire( } } -void MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { +bool MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { // Assume the frontier has been advanced // store a copy of the original this->mapping_frontier_->quantum_boundray @@ -199,6 +199,9 @@ void MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { // Get a subcircuit only for iterating vertices Subcircuit circ = this->mapping_frontier_->get_frontier_subcircuit(max_depth, max_size); + + // for return value + bool modification_made = false; // since we assume that the frontier has been advanced // we are certain that any multi-q vert lies after the frontier for (const Vertex &vert : circ.verts) { @@ -213,6 +216,7 @@ void MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { this->mapping_frontier_->circuit_, this->u_frontier_edges_, vert); if (commute_pairs != std::nullopt) { + modification_made = true; partial_rewire( vert, this->mapping_frontier_->circuit_, (*commute_pairs).first, (*commute_pairs).second); @@ -227,44 +231,18 @@ void MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { } // Return the quantum boundary to its original setting this->mapping_frontier_->set_quantum_boundary(copy); + return modification_made; } MultiGateReorderRoutingMethod::MultiGateReorderRoutingMethod( unsigned _max_depth, unsigned _max_size) : max_depth_(_max_depth), max_size_(_max_size) {} -bool MultiGateReorderRoutingMethod::check_method( - const std::shared_ptr &mapping_frontier, - const ArchitecturePtr &architecture) const { - const EdgeVec u_frontier_edges = - convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( - mapping_frontier->circuit_, mapping_frontier->quantum_boundary)); - - Subcircuit circ = mapping_frontier->get_frontier_subcircuit( - this->max_depth_, this->max_size_); - // since we assume that the frontier has been advanced - // we are certain that any multi-q vert lies after the frontier - for (const Vertex &vert : circ.verts) { - if (is_multiq_quantum_gate(mapping_frontier->circuit_, vert) && - is_physically_permitted(mapping_frontier, architecture, vert)) { - std::optional> commute_pairs = - try_find_commute_edges( - mapping_frontier->circuit_, u_frontier_edges, vert); - - if (commute_pairs != std::nullopt) { - return true; - } - } - } - return false; -} - -unit_map_t MultiGateReorderRoutingMethod::routing_method( +std::pair MultiGateReorderRoutingMethod::routing_method( std::shared_ptr &mapping_frontier, const ArchitecturePtr &architecture) const { MultiGateReorder mr(architecture, mapping_frontier); - mr.solve(this->max_depth_, this->max_size_); - return {}; + return {mr.solve(this->max_depth_, this->max_size_), {}}; } unsigned MultiGateReorderRoutingMethod::get_max_depth() const { diff --git a/tket/src/Mapping/RoutingMethodCircuit.cpp b/tket/src/Mapping/RoutingMethodCircuit.cpp index 474253a9d7..6657dfacad 100644 --- a/tket/src/Mapping/RoutingMethodCircuit.cpp +++ b/tket/src/Mapping/RoutingMethodCircuit.cpp @@ -17,32 +17,15 @@ namespace tket { RoutingMethodCircuit::RoutingMethodCircuit( - const std::function( + const std::function( const Circuit&, const ArchitecturePtr&)> _route_subcircuit, - const std::function - _check_subcircuit, unsigned _max_size, unsigned _max_depth) : route_subcircuit_(_route_subcircuit), - check_subcircuit_(_check_subcircuit), max_size_(_max_size), max_depth_(_max_depth){}; -bool RoutingMethodCircuit::check_method( - const std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const { - // Get circuit, pass to held check method - Subcircuit frontier_subcircuit = mapping_frontier->get_frontier_subcircuit( - this->max_depth_, this->max_size_); - Circuit frontier_circuit = - mapping_frontier->circuit_.subcircuit(frontier_subcircuit); - frontier_circuit.rename_units( - mapping_frontier->get_default_to_quantum_boundary_unit_map()); - - return this->check_subcircuit_(frontier_circuit, architecture); -} - -unit_map_t RoutingMethodCircuit::routing_method( +std::pair RoutingMethodCircuit::routing_method( std::shared_ptr& mapping_frontier, const ArchitecturePtr& architecture) const { // Produce subcircuit and circuit @@ -54,15 +37,19 @@ unit_map_t RoutingMethodCircuit::routing_method( mapping_frontier->get_default_to_quantum_boundary_unit_map()); // get routed subcircuit - std::tuple routed_subcircuit = + std::tuple routed_subcircuit = this->route_subcircuit_(frontier_circuit, architecture); - unit_map_t new_labelling = std::get<1>(routed_subcircuit); + + if (!std::get<0>(routed_subcircuit)) { + return {false, {}}; + } // update unit id at boundary in case of relabelling - mapping_frontier->update_quantum_boundary_uids(new_labelling); + mapping_frontier->update_quantum_boundary_uids( + std::get<2>(routed_subcircuit)); unit_map_t swap_permutation; - for (const auto& pair : new_labelling) { + for (const auto& pair : std::get<2>(routed_subcircuit)) { if (pair.first != pair.second && architecture->node_exists(Node(pair.first))) { swap_permutation.insert(pair); @@ -70,14 +57,14 @@ unit_map_t RoutingMethodCircuit::routing_method( } // permute edges held by unitid at out boundary due to swaps mapping_frontier->permute_subcircuit_q_out_hole( - std::get<2>(routed_subcircuit), frontier_subcircuit); + std::get<3>(routed_subcircuit), frontier_subcircuit); // substitute old boundary with new cirucit - std::get<0>(routed_subcircuit).flatten_registers(); + std::get<1>(routed_subcircuit).flatten_registers(); mapping_frontier->circuit_.substitute( - std::get<0>(routed_subcircuit), frontier_subcircuit); + std::get<1>(routed_subcircuit), frontier_subcircuit); // return initial unit_map_t incase swap network required - return swap_permutation; + return {true, swap_permutation}; } } // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp index 8eab4b6c93..2461c5c4d1 100644 --- a/tket/src/Mapping/RoutingMethodJson.cpp +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -13,6 +13,7 @@ // limitations under the License. #include "Mapping/RoutingMethodJson.hpp" + #include "Mapping/LexiLabelling.hpp" namespace tket { diff --git a/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp index 8b1cd45fa4..dc4a165797 100644 --- a/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp +++ b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp @@ -1,5 +1,18 @@ -#ifndef _TKET_BoxDecomposition_H_ -#define _TKET_BoxDecomposition_H_ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once #include "Mapping/MappingFrontier.hpp" #include "Mapping/RoutingMethod.hpp" @@ -19,8 +32,10 @@ class BoxDecomposition { /** * Decompose any boxes in the next slice after the frontier + * + * @return True if Box is decomposed */ - void solve(); + bool solve(); private: // Architecture all new physical operations must respect @@ -35,13 +50,6 @@ class BoxDecompositionRoutingMethod : public RoutingMethod { */ BoxDecompositionRoutingMethod(); - /** - * @return true if method can route subcircuit, false if not - */ - bool check_method( - const std::shared_ptr& mapping_frontier, - const ArchitecturePtr& /*architecture*/) const override; - /** * @param mapping_frontier Contains boundary of routed/unrouted circuit for * modifying @@ -49,7 +57,7 @@ class BoxDecompositionRoutingMethod : public RoutingMethod { * @return Logical to Physical mapping at boundary due to modification. * */ - unit_map_t routing_method( + std::pair routing_method( std::shared_ptr& mapping_frontier, const ArchitecturePtr& architecture) const override; @@ -59,5 +67,3 @@ class BoxDecompositionRoutingMethod : public RoutingMethod { }; } // namespace tket - -#endif \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/LexiLabelling.hpp b/tket/src/Mapping/include/Mapping/LexiLabelling.hpp index 59fd7f756a..17a50ce31b 100644 --- a/tket/src/Mapping/include/Mapping/LexiLabelling.hpp +++ b/tket/src/Mapping/include/Mapping/LexiLabelling.hpp @@ -27,21 +27,15 @@ class LexiLabellingMethod : public RoutingMethod { */ LexiLabellingMethod(){}; - /** - * @return true if method can label unlabelled qubits - */ - bool check_method( - const std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const override; - /** * @param mapping_frontier Contains boundary of routed/unrouted circuit for * modifying * @param architecture Architecture providing physical constraints - * @return Logical to Physical mapping at boundary due to modification. + * @return True if transformation made, Logical to Physical mapping at + * boundary due to modification. * */ - unit_map_t routing_method( + std::pair routing_method( std::shared_ptr& mapping_frontier, const ArchitecturePtr& architecture) const override; diff --git a/tket/src/Mapping/include/Mapping/LexiRoute.hpp b/tket/src/Mapping/include/Mapping/LexiRoute.hpp index 609d7cd668..1871f1293c 100644 --- a/tket/src/Mapping/include/Mapping/LexiRoute.hpp +++ b/tket/src/Mapping/include/Mapping/LexiRoute.hpp @@ -55,16 +55,20 @@ class LexiRoute { * * @param lookahead Number of slices to lookahead at when determining best * SWAP or BRIDGE + * + * @return True if solve has modified circuit for mapping purposes */ - void solve(unsigned lookahead); + bool solve(unsigned lookahead); /** * When called an "unlabelled" Qubit in the Circuit may be relabelled to a * Node in the Architecture, or an "unlabelled" Qubit may have its path merged * with an ancilla qubit. The decision making is based on the heuristic * outlined in arXiv:1902.08091. + * + * @return True if solve_labelling has modified circuit for mapping purposes */ - void solve_labelling(); + bool solve_labelling(); private: /** @@ -76,7 +80,9 @@ class LexiRoute { * @param assigned_only If true, only include interactions where both UnitID * are in this->architecture_. */ - void set_interacting_uids(bool assigned_only = false); + bool set_interacting_uids( + bool assigned_only = false, bool route_check = false, + bool label_check = false); /** * If there is some "free" Node in Architecture at distance "distances" on @@ -162,49 +168,4 @@ class LexiRoute { std::set assigned_nodes_; }; -// Child class of RoutingMethod, with overloaded methods for routing -// MappingFrontier objects -class LexiRouteRoutingMethod : public RoutingMethod { - public: - /** - * Checking and Routing methods redefined using LexiRoute. Only circuit depth, - * corresponding to lookahead, is a required parameter. - * - * @param _max_depth Number of layers of gates checked inr outed subcircuit. - */ - LexiRouteRoutingMethod(unsigned _max_depth = 100); - - /** - * @return true if method can route subcircuit, false if not - */ - bool check_method( - const std::shared_ptr& /*mapping_frontier*/, - const ArchitecturePtr& /*architecture*/) const override; - - /** - * @param mapping_frontier Contains boundary of routed/unrouted circuit for - * modifying - * @param architecture Architecture providing physical constraints - * @return Map between relabelled Qubit, always empty. - * - */ - unit_map_t routing_method( - std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const override; - - /** - * @return Max depth used in lookahead - */ - unsigned get_max_depth() const; - - nlohmann::json serialize() const override; - - static LexiRouteRoutingMethod deserialize(const nlohmann::json& j); - - private: - unsigned max_depth_; -}; - -JSON_DECL(LexiRouteRoutingMethod); - } // namespace tket diff --git a/tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp b/tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp new file mode 100644 index 0000000000..c490d4dadf --- /dev/null +++ b/tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp @@ -0,0 +1,60 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class LexiRouteRoutingMethod : public RoutingMethod { + public: + /** + * Checking and Routing methods redefined using LexiRoute. Only circuit depth, + * corresponding to lookahead, is a required parameter. + * + * @param _max_depth Number of layers of gates checked inr outed subcircuit. + */ + LexiRouteRoutingMethod(unsigned _max_depth = 100); + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * + * @return True if modification made, map between relabelled Qubit, always + * empty. + * + */ + std::pair routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + /** + * @return Max depth used in lookahead + */ + unsigned get_max_depth() const; + + nlohmann::json serialize() const override; + + static LexiRouteRoutingMethod deserialize(const nlohmann::json& j); + + private: + unsigned max_depth_; +}; + +JSON_DECL(LexiRouteRoutingMethod); + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp index 317cf9b6d7..63434fb383 100644 --- a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp +++ b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp @@ -34,8 +34,10 @@ class MultiGateReorder { * Try to commute any multi-qubit gates to the quantum frontier * @param max_depth Maximum number of layers of gates checked for commutation. * @param max_size Maximum number of gates checked for commutation. + * + * @return true if modification made */ - void solve(unsigned max_depth, unsigned max_size); + bool solve(unsigned max_depth, unsigned max_size); private: // Architecture all new physical operations must respect @@ -55,21 +57,15 @@ class MultiGateReorderRoutingMethod : public RoutingMethod { MultiGateReorderRoutingMethod( unsigned _max_depth = 10, unsigned _max_size = 10); - /** - * @return true if method can route subcircuit, false if not - */ - bool check_method( - const std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const override; - /** * @param mapping_frontier Contains boundary of routed/unrouted circuit for * modifying * @param architecture Architecture providing physical constraints - * @return Logical to Physical mapping at boundary due to modification. + * @return Whether circuit is modified and Logical to Physical mapping at + * boundary due to modification (always empty) * */ - unit_map_t routing_method( + std::pair routing_method( std::shared_ptr& mapping_frontier, const ArchitecturePtr& architecture) const override; diff --git a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp index 23041e2105..de4e440ebf 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp @@ -23,42 +23,29 @@ class RoutingMethod { public: RoutingMethod(){}; virtual ~RoutingMethod() {} - /** - * check_method returns true if held method can route given circuit. - * This is completed by converting boundary subcircuit to a Circuit object - * which is then passed to check_subcircuit_ as defined in constructor. - * - * Overloded parameter mapping_frontier contains boundary of gates to be - * checked for method. - * Overloaded parameter architecture is the architecture method works with - * if permitted. - * @return true if method can route subcircuit, false if not - */ - virtual bool check_method( - const std::shared_ptr& /*mapping_frontier*/, - const ArchitecturePtr& /*architecture*/) const { - return false; - } /** * routing_method modifies circuit held in mapping_frontier with gates for the * purpose of moving circuit closer to one physically permitted by given - * architecture. Returns new initial mapping of qubits incase permutation via - * swap network is then required, or new ancilla qubits are added. - * This is completed by converting boundaty subcircuit in mapping frontier to - * a Circuit object which is then passed to route_subcircuit_ as defined in - * the constructor. + * architecture. Returns a pair with a bool returning whether any modification + * was made and a new initial mapping of qubits in case permutation via swap + * network is then required, or new ancilla qubits are added. This is + * completed by converting boundary subcircuit in mapping frontier to a + * Circuit object which is then passed to route_subcircuit_ as defined in the + * constructor. * * Overloaded parameter mapping_frontier contains boundary of routed/unrouted * circuit for modifying. * Overloaded parameter architecture provides physical constraints - * @return Logical to Physical mapping at boundary due to modification. + * + * @return Whether circuit is modified and Logical to Physical mapping at + * boundary due to modification. * */ - virtual unit_map_t routing_method( + virtual std::pair routing_method( std::shared_ptr& /*mapping_frontier*/, const ArchitecturePtr& /*architecture*/) const { - return {}; + return {false, {}}; } virtual nlohmann::json serialize() const { diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp index 63a5ca7b15..3189bb6504 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp @@ -26,27 +26,15 @@ class RoutingMethodCircuit : public RoutingMethod { * in the incremental routing of full circuits. * * @param _route_subcircuit Function ptr for partial routing method - * @param _check_subcircuit Function ptr for confirming if method sufficient * @param _max_size Max number of gates in partial routing circuit * @param _max_depth Max depth of partial routing circuit */ RoutingMethodCircuit( - const std::function( + const std::function( const Circuit&, const ArchitecturePtr&)> _route_subcircuit, - const std::function - _check_subcircuit, unsigned _max_size, unsigned _max_depth); - /** - * @param mapping_frontier Contains boundary of gates to be checked for method - * @param architecture Architecture method would work with if permitted - * @return true if method can route subcircuit, false if not - */ - bool check_method( - const std::shared_ptr& mapping_frontier, - const ArchitecturePtr& architecture) const; - /** * @param mapping_frontier Contains boundary of routed/unrouted circuit for * modifying @@ -54,16 +42,14 @@ class RoutingMethodCircuit : public RoutingMethod { * @return Logical to Physical mapping at boundary due to modification. * */ - unit_map_t routing_method( + std::pair routing_method( std::shared_ptr& mapping_frontier, const ArchitecturePtr& architecture) const; private: - const std::function( + const std::function( const Circuit&, const ArchitecturePtr&)> route_subcircuit_; - const std::function - check_subcircuit_; unsigned max_size_, max_depth_; }; diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp index 0a12ed92ff..475e98ffcb 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp @@ -15,7 +15,8 @@ #pragma once #include "Mapping/BoxDecomposition.hpp" -#include "Mapping/LexiRoute.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRouteRoutingMethod.hpp" #include "Mapping/MultiGateReorder.hpp" #include "Mapping/RoutingMethod.hpp" #include "Utils/Json.hpp" diff --git a/tket/src/PauliGraph/PauliGraph.cpp b/tket/src/PauliGraph/PauliGraph.cpp index e0b1b83c85..86305fd2ed 100644 --- a/tket/src/PauliGraph/PauliGraph.cpp +++ b/tket/src/PauliGraph/PauliGraph.cpp @@ -313,7 +313,8 @@ void PauliGraph::apply_pauli_gadget_at_end( } PauliGraph::TopSortIterator::TopSortIterator() - : current_vert_(boost::graph_traits::null_vertex()) {} + : pg_(nullptr), + current_vert_(boost::graph_traits::null_vertex()) {} PauliGraph::TopSortIterator::TopSortIterator(const PauliGraph &pg) { if (pg.start_line_.empty()) { diff --git a/tket/src/Placement/CMakeLists.txt b/tket/src/Placement/CMakeLists.txt index fd67a50983..29bd59dd24 100644 --- a/tket/src/Placement/CMakeLists.txt +++ b/tket/src/Placement/CMakeLists.txt @@ -22,7 +22,8 @@ add_library(tket-${COMP} Qubit_Placement.cpp subgraph_mapping.cpp Placement.cpp - PlacementGraphClasses.cpp) + PlacementGraphClasses.cpp + NeighbourPlacements.cpp) list(APPEND DEPS_${COMP} Architecture @@ -32,6 +33,7 @@ list(APPEND DEPS_${COMP} Graphs Ops OpType + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Placement/NeighbourPlacements.cpp b/tket/src/Placement/NeighbourPlacements.cpp new file mode 100644 index 0000000000..517d6db5bf --- /dev/null +++ b/tket/src/Placement/NeighbourPlacements.cpp @@ -0,0 +1,145 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "NeighbourPlacements.hpp" + +#include +#include + +#include "TokenSwapping/SwapListOptimiser.hpp" +#include "Utils/TketLog.hpp" + +namespace tket { + +NeighbourPlacements::NeighbourPlacements( + const Architecture& arc, const qubit_mapping_t& init_map) + : arc_(arc), init_map_(init_map), u_to_node_(), rng_() { + auto nodes = arc_.get_all_nodes_vec(); + for (unsigned i = 0; i < nodes.size(); ++i) { + u_to_node_.left.insert({i, nodes[i]}); + } +} + +NeighbourPlacements::ResultVec NeighbourPlacements::get( + unsigned dist, unsigned n, bool optimise, unsigned seed, + unsigned max_tries) { + rng_.set_seed(seed); + + // define a comparison function for placements + std::vector keys; + for (auto [k, v] : init_map_) { + keys.push_back(k); + } + auto map_compare = [&keys]( + const qubit_mapping_t& a, const qubit_mapping_t& b) { + for (auto k : keys) { + if (a.at(k) < b.at(k)) { + return true; + } else if (a.at(k) > b.at(k)) { + return false; + } + } + return false; + }; + // set of all generated placement maps + std::set placements(map_compare); + + ResultVec resvec; + for (unsigned i = 0; i < n; ++i) { + unsigned n_unsuccessful = 0; + while (n_unsuccessful < max_tries) { + Result res = gen_result(dist, optimise, max_tries); + if (!placements.contains(res.map)) { + resvec.push_back(res); + placements.insert(res.map); + break; + } + ++n_unsuccessful; + } + if (n_unsuccessful == max_tries) { + tket_log()->warn( + "Could not generate " + std::to_string(n) + " distinct placements"); + } + } + return resvec; +} + +NeighbourPlacements::Result NeighbourPlacements::gen_result( + unsigned dist, bool optimise, unsigned max_tries) { + SwapList swaps; + tsa_internal::SwapListOptimiser optimiser; + + // it might be impossible to find `dist` non-trivial swaps + unsigned n_unsuccessful = 0; + + while (swaps.size() < dist && n_unsuccessful < max_tries) { + Swap new_swap = gen_swap(); + + if (optimise) { + SwapList swaps_candidate = swaps; + swaps_candidate.push_back(new_swap); + optimiser.full_optimise(swaps_candidate); + if (swaps_candidate.size() > swaps.size()) { + swaps = std::move(swaps_candidate); + n_unsuccessful = 0; + } else { + ++n_unsuccessful; + } + } else { + swaps.push_back(new_swap); + } + } + + if (n_unsuccessful == max_tries) { + tket_log()->warn( + "Unable to generate " + std::to_string(dist) + + " swaps for given architecture"); + } + + return convert_to_res(swaps.to_vector()); +} + +Swap NeighbourPlacements::gen_swap() { + auto edges = arc_.get_all_edges_vec(); + unsigned m = edges.size(); + auto [n1, n2] = edges[rng_.get_size_t(m - 1)]; + Swap new_swap{u_to_node_.right.at(n1), u_to_node_.right.at(n2)}; + return new_swap; +} + +NeighbourPlacements::Result NeighbourPlacements::convert_to_res( + const SwapVec& swaps) { + NodeSwapVec node_swaps; + for (auto [u1, u2] : swaps) { + node_swaps.push_back({u_to_node_.left.at(u1), u_to_node_.left.at(u2)}); + } + + qubit_bimap_t qubit_to_node; + qubit_to_node.left.insert(init_map_.begin(), init_map_.end()); + for (auto [n1, n2] : node_swaps) { + const Qubit q1 = qubit_to_node.right.at(n1); + const Qubit q2 = qubit_to_node.right.at(n2); + qubit_to_node.left.erase(q1); + qubit_to_node.left.erase(q2); + qubit_to_node.left.insert({q1, n2}); + qubit_to_node.left.insert({q2, n1}); + } + qubit_mapping_t map; + for (auto [k, v] : qubit_to_node.left) { + map.insert({k, v}); + } + return {map, node_swaps}; +} + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Placement/Placement.cpp b/tket/src/Placement/Placement.cpp index adb248ed43..e5d2184593 100644 --- a/tket/src/Placement/Placement.cpp +++ b/tket/src/Placement/Placement.cpp @@ -154,6 +154,45 @@ std::vector Placement::get_all_placement_maps( return {get_placement_map(circ_)}; } +qubit_mapping_t NaivePlacement::get_placement_map(const Circuit &circ_) const { + return get_all_placement_maps(circ_).at(0); +} + +std::vector NaivePlacement::get_all_placement_maps( + const Circuit &circ_) const { + qubit_mapping_t placement; + qubit_vector_t to_place; + std::vector placed; + + // Find which/if any qubits need placing + for (const Qubit &q : circ_.all_qubits()) { + Node n(q); + if (!this->arc_.node_exists(n)) { + to_place.push_back(n); + } else { + placed.push_back(n); + // if already placed, make sure qubit retains placement + placement.insert({n, n}); + } + } + // avoid doing std::set_difference unless qubits need to be placed + unsigned n_placed = to_place.size(); + if (n_placed > 0) { + std::vector difference, + architecture_nodes = this->arc_.get_all_nodes_vec(); + std::set_difference( + architecture_nodes.begin(), architecture_nodes.end(), placed.begin(), + placed.end(), std::inserter(difference, difference.begin())); + // should always be enough remaining qubits to assign unplaced qubits to + TKET_ASSERT(difference.size() >= n_placed); + for (unsigned i = 0; i < n_placed; i++) { + // naively assign each qubit to some free node + placement.insert({to_place[i], difference[i]}); + } + } + return {placement}; +} + qubit_mapping_t LinePlacement::get_placement_map(const Circuit &circ_) const { return get_all_placement_maps(circ_).at(0); } diff --git a/tket/src/Placement/include/Placement/NeighbourPlacements.hpp b/tket/src/Placement/include/Placement/NeighbourPlacements.hpp new file mode 100644 index 0000000000..236a64b79b --- /dev/null +++ b/tket/src/Placement/include/Placement/NeighbourPlacements.hpp @@ -0,0 +1,99 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "Placement.hpp" +#include "TokenSwapping/SwapFunctions.hpp" +#include "Utils/BiMapHeaders.hpp" +#include "Utils/RNG.hpp" + +namespace tket { + +/** + * @brief Given a placement map generates `n` nearby placement maps. + * + * Based on an architecture and a placement map, generates random + * placements that can be achieved with `m` swaps. + * + * Optionally uses token swapping optimisations to try to ensure + * that the generated placements cannot be obtained in less than `m` + * swaps, but this cannot be guaranteed. + */ +class NeighbourPlacements { + public: + using SwapVec = std::vector; + using NodeSwap = std::pair; + using NodeSwapVec = std::vector; + struct Result { + qubit_mapping_t map; + NodeSwapVec swaps; + }; + using ResultVec = std::vector; + + /** + * @brief Construct a new Swap Placement object. + * + * @param arc The architecture defining the allowed swaps. + * @param init_map The initial Qubit => Node map. + */ + NeighbourPlacements(const Architecture& arc, const qubit_mapping_t& init_map); + + /** + * @brief Generate `n` distinct placement maps using `dist` swaps for each map + * + * The sequences of swaps are generated randomly. Note that it cannot be + * guaranteed that the generated placement cannot be obtained in less than + * `dist` swaps. When optimise=true (default), we attempt to simplify + * chains of swaps to make it more likely that `dist` swaps are indeed + * necessary for the generated placement maps. + * + * If optimise=true, it is also possible that placements `dist` swaps away + * do not exist. `max_tries` controls the number of attempts to generate + * placements. + * + * If it is impossible (or very hard) to generate `n` distinct placement maps + * of distance `dist` swaps away, then this method will raise a warning + * and return fewer results and/or results with less than `dist` swaps. + * + * @param dist The number of swaps allowed on the architecture. + * @param n The number of placement maps to generate (default n=1). + * @param optimise Simplify the generated swap sequences (default true). + * @param seed Seed for random number generator (default seed=5489). + * @param max_tries Number of tries before aborting placement map generation + * (default max_tries=10). + * @return ResultVec A vector of the generated maps and swaps + */ + ResultVec get( + unsigned dist, unsigned n = 1, bool optimise = true, unsigned seed = 5489, + unsigned max_tries = 10); + + private: + // generate a single Result + Result gen_result( + unsigned dist, bool optimise = true, unsigned max_tries = 10); + // generate a single swap + Swap gen_swap(); + // apply swap list to init_map and return new placement map + Result convert_to_res(const SwapVec& swaps); + + Architecture arc_; + qubit_mapping_t init_map_; + boost::bimap u_to_node_; + RNG rng_; +}; + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Placement/include/Placement/Placement.hpp b/tket/src/Placement/include/Placement/Placement.hpp index e567c0042b..ff2f2dd60c 100644 --- a/tket/src/Placement/include/Placement/Placement.hpp +++ b/tket/src/Placement/include/Placement/Placement.hpp @@ -269,6 +269,42 @@ class Placement { Architecture arc_; }; +/** + * NaivePlacement class provides methods for relabelling any + * Qubit objects in some Circuit to Node objects in some Architecture + * given the constraint that only Qubit that are not already labelled + * as some Node can be relabelled, and only to Architecture Node + * that are not already in the Circuit. + */ +class NaivePlacement : public Placement { + public: + /** + * @param _arc Architecture object later relabellings are produced for + */ + explicit NaivePlacement(const Architecture& _arc) { arc_ = _arc; } + /** + * Given some circuit, returns a map between Qubit which defines some + * relabelling of some Circuit qubits to Architecture qubits + * + * @param circ_ Circuit map relabelling is defined for + * + * @return Map defining relabelling for circuit Qubit objects + */ + qubit_mapping_t get_placement_map(const Circuit& circ_) const override; + + /** + * Given some circuit, returns a single map for relabelling + * in a vector. + * + * @param circ_ Circuit map relabelling is defined for + * + * @return Vector of a single Map defining relabelling for Circuit + * Qubit objects. + */ + std::vector get_all_placement_maps( + const Circuit& circ_) const override; +}; + class LinePlacement : public Placement { public: explicit LinePlacement(const Architecture& _arc) { arc_ = _arc; } diff --git a/tket/src/Predicates/CompilerPass.cpp b/tket/src/Predicates/CompilerPass.cpp index 654ca3ea0f..50218aeb84 100644 --- a/tket/src/Predicates/CompilerPass.cpp +++ b/tket/src/Predicates/CompilerPass.cpp @@ -438,6 +438,9 @@ void from_json(const nlohmann::json& j, PassPtr& pp) { } else if (passname == "PlacementPass") { pp = gen_placement_pass(content.at("placement").get()); + } else if (passname == "NaivePlacementPass") { + pp = gen_naive_placement_pass( + content.at("architecture").get()); } else if (passname == "RenameQubitsPass") { pp = gen_rename_qubits_pass( content.at("qubit_map").get>()); diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index 8aa558caf0..23ee444480 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -183,10 +183,34 @@ PassPtr gen_placement_pass(const PlacementPtr& placement_ptr) { return std::make_shared(precons, t, pc, j); } +PassPtr gen_naive_placement_pass(const Architecture& arc) { + Transform::Transformation trans = [=](Circuit& circ, + std::shared_ptr maps) { + NaivePlacement np(arc); + return np.place(circ, maps); + }; + Transform t = Transform(trans); + PredicatePtr n_qubit_pred = + std::make_shared(arc.n_nodes()); + + PredicatePtrMap precons{CompilationUnit::make_type_pair(n_qubit_pred)}; + PredicatePtr placement_pred = std::make_shared(arc); + PredicatePtrMap s_postcons{CompilationUnit::make_type_pair(placement_pred)}; + PostConditions pc{s_postcons, {}, Guarantee::Preserve}; + // record pass config + nlohmann::json j; + j["name"] = "NaivePlacementPass"; + j["architecture"] = arc; + return std::make_shared(precons, t, pc, j); +} + PassPtr gen_full_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, const std::vector& config) { - return gen_placement_pass(placement_ptr) >> gen_routing_pass(arc, config); + std::vector vpp = { + gen_placement_pass(placement_ptr), gen_routing_pass(arc, config), + gen_naive_placement_pass(arc)}; + return std::make_shared(vpp); } PassPtr gen_default_mapping_pass(const Architecture& arc, bool delay_measures) { diff --git a/tket/src/Predicates/include/Predicates/CompilerPass.hpp b/tket/src/Predicates/include/Predicates/CompilerPass.hpp index 89bef2e497..31bf9f0056 100644 --- a/tket/src/Predicates/include/Predicates/CompilerPass.hpp +++ b/tket/src/Predicates/include/Predicates/CompilerPass.hpp @@ -65,6 +65,13 @@ struct PostConditions { PredicatePtrMap specific_postcons_; PredicateClassGuarantees generic_postcons_; Guarantee default_postcon_; + PostConditions( + const PredicatePtrMap& specific_postcons = {}, + const PredicateClassGuarantees& generic_postcons = {}, + Guarantee default_postcon = Guarantee::Clear) + : specific_postcons_(specific_postcons), + generic_postcons_(generic_postcons), + default_postcon_(default_postcon) {} }; /** diff --git a/tket/src/Predicates/include/Predicates/PassGenerators.hpp b/tket/src/Predicates/include/Predicates/PassGenerators.hpp index bc204b4217..3bccaa3ea0 100644 --- a/tket/src/Predicates/include/Predicates/PassGenerators.hpp +++ b/tket/src/Predicates/include/Predicates/PassGenerators.hpp @@ -45,6 +45,8 @@ PassPtr gen_clifford_simp_pass(bool allow_swaps = true); PassPtr gen_rename_qubits_pass(const std::map& qm); PassPtr gen_placement_pass(const PlacementPtr& placement_ptr); + +PassPtr gen_naive_placement_pass(const Architecture& arc); /* This higher order function generates a Routing pass using the std::vector object */ PassPtr gen_full_mapping_pass( diff --git a/tket/src/Program/Program_iteration.cpp b/tket/src/Program/Program_iteration.cpp index 634c3429b9..af10b97eff 100644 --- a/tket/src/Program/Program_iteration.cpp +++ b/tket/src/Program/Program_iteration.cpp @@ -18,7 +18,8 @@ namespace tket { Program::BlockIterator::BlockIterator() - : current_vert_(boost::graph_traits::null_vertex()) {} + : prog_(nullptr), + current_vert_(boost::graph_traits::null_vertex()) {} Program::BlockIterator::BlockIterator(const Program &p) { FGVert first = p.get_successors(p.entry_).front(); diff --git a/tket/src/TokenSwappingWithArch/CMakeLists.txt b/tket/src/TokenSwappingWithArch/CMakeLists.txt deleted file mode 100644 index 96f84066be..0000000000 --- a/tket/src/TokenSwappingWithArch/CMakeLists.txt +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2019-2022 Cambridge Quantum Computing -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -project(tket-${COMP}) - -if (NOT ${COMP} STREQUAL "TokenSwappingWithArch") - message(FATAL_ERROR "Unexpected component name.") -endif() - -add_library(tket-${COMP} - ArchitectureMapping.cpp - BestTsaWithArch.cpp - DistancesFromArchitecture.cpp - NeighboursFromArchitecture.cpp - ) - -list(APPEND DEPS_${COMP} - Circuit - Architecture - Graphs - Ops - OpType - TokenSwapping - Utils) - -foreach(DEP ${DEPS_${COMP}}) - target_include_directories( - tket-${COMP} PRIVATE ${TKET_${DEP}_INCLUDE_DIR}) - target_link_libraries( - tket-${COMP} PRIVATE tket-${DEP}) -endforeach() - -target_include_directories(tket-${COMP} - PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR} - ${TKET_${COMP}_INCLUDE_DIR} - ${TKET_${COMP}_INCLUDE_DIR}/${COMP}) - -target_link_libraries(tket-${COMP} PRIVATE ${CONAN_LIBS}) diff --git a/tket/src/Transformations/ControlledGates.cpp b/tket/src/Transformations/ControlledGates.cpp index 73bd2e0f2c..27fa953eb4 100644 --- a/tket/src/Transformations/ControlledGates.cpp +++ b/tket/src/Transformations/ControlledGates.cpp @@ -21,6 +21,7 @@ #include "Circuit/CircPool.hpp" #include "Circuit/DAGDefs.hpp" +#include "OpType/OpType.hpp" #include "Transform.hpp" #include "Utils/EigenConfig.hpp" #include "Utils/HelperFunctions.hpp" @@ -432,22 +433,25 @@ static Circuit lemma71( if (rep.n_gates() != correct_gate_count) throw ControlDecompError("Error in Lemma 7.1: Gate count is incorrect"); auto [vit, vend] = boost::vertices(rep.dag); + VertexSet bin; for (auto next = vit; vit != vend; vit = next) { ++next; Vertex v = *vit; - if (rep.get_OpType_from_Vertex(v) == OpType::CRy) { - Expr v_angle = rep.get_Op_ptr_from_Vertex(v)->get_params()[0]; - Circuit cry_replacement = CircPool::CRy_using_CX(v_angle); - Subcircuit sub{rep.get_in_edges(v), rep.get_all_out_edges(v), {v}}; - rep.substitute(cry_replacement, sub, Circuit::VertexDeletion::Yes); - } - if (rep.get_OpType_from_Vertex(v) == OpType::CU1) { - Expr v_angle = rep.get_Op_ptr_from_Vertex(v)->get_params()[0]; - Circuit cu1_replacement = CircPool::CU1_using_CX(v_angle); - Subcircuit sub{rep.get_in_edges(v), rep.get_all_out_edges(v), {v}}; - rep.substitute(cu1_replacement, sub, Circuit::VertexDeletion::Yes); + if (!bin.contains(v)) { + OpType optype = rep.get_OpType_from_Vertex(v); + if (optype == OpType::CRy || optype == OpType::CU1) { + Expr v_angle = rep.get_Op_ptr_from_Vertex(v)->get_params()[0]; + Circuit replacement = (optype == OpType::CRy) + ? CircPool::CRy_using_CX(v_angle) + : CircPool::CU1_using_CX(v_angle); + Subcircuit sub{rep.get_in_edges(v), rep.get_all_out_edges(v), {v}}; + rep.substitute(replacement, sub, Circuit::VertexDeletion::No); + bin.insert(v); + } } } + rep.remove_vertices( + bin, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); return rep; } diff --git a/tket/src/Transformations/Rebase.cpp b/tket/src/Transformations/Rebase.cpp index 8c75adc9d8..fedb1903f9 100644 --- a/tket/src/Transformations/Rebase.cpp +++ b/tket/src/Transformations/Rebase.cpp @@ -77,9 +77,7 @@ static bool standard_rebase( success = circ.substitute_all(cx_replacement, cx_op) | success; } BGL_FORALL_VERTICES(v, circ.dag, DAG) { - if (circ.n_in_edges_of_type(v, EdgeType::Quantum) != 1 || - circ.n_in_edges_of_type(v, EdgeType::Quantum) != 1) - continue; + if (circ.n_in_edges_of_type(v, EdgeType::Quantum) != 1) continue; Op_ptr op = circ.get_Op_ptr_from_Vertex(v); bool conditional = op->get_type() == OpType::Conditional; if (conditional) { diff --git a/tket/src/Utils/include/Utils/UnitID.hpp b/tket/src/Utils/include/Utils/UnitID.hpp index 7f70ac7c63..fd60c67452 100644 --- a/tket/src/Utils/include/Utils/UnitID.hpp +++ b/tket/src/Utils/include/Utils/UnitID.hpp @@ -90,6 +90,7 @@ class UnitID { if (n < 0) return true; return data_->index_ < other.data_->index_; } + bool operator>(const UnitID &other) const { return other < *this; } bool operator==(const UnitID &other) const { return (this->data_->name_ == other.data_->name_) && (this->data_->index_ == other.data_->index_); diff --git a/tket/src/ZX/CMakeLists.txt b/tket/src/ZX/CMakeLists.txt index 8021d15192..98d166404a 100644 --- a/tket/src/ZX/CMakeLists.txt +++ b/tket/src/ZX/CMakeLists.txt @@ -21,6 +21,7 @@ endif() add_library(tket-${COMP} ZXDConstructors.cpp ZXDExpansions.cpp + ZXDFormats.cpp ZXDGettersSetters.cpp ZXDManipulation.cpp ZXGenerator.cpp @@ -28,7 +29,8 @@ add_library(tket-${COMP} ZXRWAxioms.cpp ZXRWDecompositions.cpp ZXRWGraphLikeForm.cpp - ZXRWGraphLikeSimplification.cpp) + ZXRWGraphLikeSimplification.cpp + Flow.cpp) list(APPEND DEPS_${COMP} Utils) diff --git a/tket/src/ZX/Flow.cpp b/tket/src/ZX/Flow.cpp new file mode 100644 index 0000000000..e5561df993 --- /dev/null +++ b/tket/src/ZX/Flow.cpp @@ -0,0 +1,611 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ZX/Flow.hpp" + +#include "Utils/GraphHeaders.hpp" +#include "Utils/MatrixAnalysis.hpp" + +namespace tket { + +namespace zx { + +Flow::Flow( + const std::map& c, + const std::map& d) + : c_(c), d_(d) {} + +ZXVertSeqSet Flow::c(const ZXVert& v) const { return c_.at(v); } + +ZXVertSeqSet Flow::odd(const ZXVert& v, const ZXDiagram& diag) const { + sequenced_map_t parities; + ZXVertSeqSet cv = c(v); + for (const ZXVert& u : cv.get()) { + for (const ZXVert& n : diag.neighbours(u)) { + sequenced_map_t::iterator found = + parities.get().find(n); + if (found == parities.get().end()) { + parities.insert({n, 1}); + } else { + parities.replace(found, {n, found->second + 1}); + } + } + } + ZXVertSeqSet odds; + for (const std::pair& p : parities.get()) { + if (p.second % 2 == 1) { + odds.insert(p.first); + } + } + return odds; +} + +unsigned Flow::d(const ZXVert& v) const { return d_.at(v); } + +void Flow::verify(const ZXDiagram& diag) const { + if (!diag.is_MBQC()) + throw ZXError("Verifying a flow for a diagram that is not in MBQC form"); + BGL_FORALL_VERTICES(u, *diag.graph, ZXGraph) { + ZXType type = diag.get_zxtype(u); + if (is_boundary_type(type)) continue; + ZXVertSeqSet uc = c(u); + ZXVertSeqSet uodd = odd(u, diag); + for (const ZXVert& v : uc.get()) { + ZXType vt = diag.get_zxtype(v); + if (u != v && vt != ZXType::PX && vt != ZXType::PY && d(u) <= d(v)) + throw ZXError("A qubit has an X correction in its past"); + if (u != v && vt == ZXType::PY && d(u) <= d(v) && + uodd.find(v) == uodd.end()) + throw ZXError("A past Y vertex receives an X correction"); + } + for (const ZXVert& v : uodd.get()) { + ZXType vt = diag.get_zxtype(v); + if (u != v && vt != ZXType::PY && vt != ZXType::PZ && d(u) <= d(v)) + throw ZXError("A qubit has a Z correction in its past"); + if (u != v && vt == ZXType::PY && d(u) <= d(v) && uc.find(v) == uc.end()) + throw ZXError("A past Y vertex receives a Z correction"); + } + bool self_x = (uc.find(u) != uc.end()); + bool self_z = (uodd.find(u) != uodd.end()); + switch (type) { + case ZXType::XY: { + if (self_x || !self_z) + throw ZXError("XY vertex must be corrected with a Z"); + break; + } + case ZXType::XZ: { + if (!self_x || !self_z) + throw ZXError("XZ vertex must be corrected with a Y"); + break; + } + case ZXType::YZ: { + if (!self_x || self_z) + throw ZXError("YZ vertex must be corrected with an X"); + break; + } + case ZXType::PX: { + if (!self_z) throw ZXError("PX vertex must be corrected with a Y or Z"); + break; + } + case ZXType::PY: { + if (self_x == self_z) + throw ZXError("PY vertex must be corrected with an X or Z"); + break; + } + case ZXType::PZ: { + if (!self_x) + throw ZXError("PZ vertex must be corrected with an X or Y"); + break; + } + default: + throw ZXError("Invalid ZXType for MBQC diagram"); + } + } +} + +void Flow::focus(const ZXDiagram& diag) { + std::map order; + for (const std::pair& p : d_) { + auto found = order.find(p.second); + if (found == order.end()) + order.insert({p.second, {p.first}}); + else + found->second.push_back(p.first); + } + + for (const std::pair& p : order) { + for (const ZXVert& u : p.second) { + if (diag.get_zxtype(u) == ZXType::Output) continue; + ZXVertSeqSet uc = c(u); + ZXVertSeqSet uodd = odd(u, diag); + sequenced_map_t parities; + for (const ZXVert& v : uc.get()) parities.insert({v, 1}); + for (const ZXVert& v : uc.get()) { + if (v == u) continue; + ZXType vtype = diag.get_zxtype(v); + if ((vtype != ZXType::Output && vtype != ZXType::XY && + vtype != ZXType::PX && vtype != ZXType::PY) || + (vtype == ZXType::PY && uodd.find(v) == uodd.end())) { + ZXVertSeqSet cv = c(v); + for (const ZXVert& w : cv.get()) { + auto found = parities.get().find(w); + if (found == parities.get().end()) + parities.insert({w, 1}); + else + parities.replace(found, {w, found->second + 1}); + } + } + } + for (const ZXVert& v : uodd.get()) { + if (v == u) continue; + ZXType vtype = diag.get_zxtype(v); + if ((vtype != ZXType::Output && vtype != ZXType::XZ && + vtype != ZXType::YZ && vtype != ZXType::PY && + vtype != ZXType::PZ) || + (vtype == ZXType::PY && uc.find(v) == uc.end())) { + ZXVertSeqSet cv = c(v); + for (const ZXVert& w : cv.get()) { + auto found = parities.get().find(w); + if (found == parities.get().end()) + parities.insert({w, 1}); + else + parities.replace(found, {w, found->second + 1}); + } + } + } + ZXVertSeqSet new_c; + for (const std::pair p : parities.get()) { + if (p.second % 2 == 1) new_c.insert(p.first); + } + c_.at(u) = new_c; + } + } +} + +Flow Flow::identify_causal_flow(const ZXDiagram& diag) { + // Check diagram has the expected form for causal flow + if (!diag.is_MBQC()) + throw ZXError("ZXDiagram must be in MBQC form to identify causal flow"); + BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { + ZXType vtype = diag.get_zxtype(v); + if (!is_boundary_type(vtype) && vtype != ZXType::XY) + throw ZXError("Causal flow is only defined when all vertices are XY"); + } + + // solved contains all vertices for which we have found corrections + ZXVertSeqSet solved; + // correctors are those vertices that have been solved but are not yet + // fl.c(u) for some u + ZXVertSeqSet correctors; + // past[v] is undefined if v is not yet solved + // past[v] is the number of neighbours of v that are still unsolved + // When past[v] drops to 1, we can correct the unsolved vertex using an X on + // v and Z on all of its other neighbours + std::map past; + Flow fl{{}, {}}; + + // Outputs are trivially solved + for (const ZXVert& o : diag.get_boundary(ZXType::Output)) { + // MBQC form of ZX Diagrams requires each output to have a unique Hadamard + // edge to another vertex + past[o] = 1; + solved.insert(o); + fl.c_.insert({o, {}}); + fl.d_.insert({o, 0}); + // All outputs have been extended so are either non-inputs or disconnected + // from any other vertices, so safe to add to correctors + correctors.insert(o); + } + + unsigned depth = 1; + + do { + ZXVertSeqSet new_correctors; + for (const ZXVert& v : correctors.get()) { + // Determine whether |N(v) cap unsolved| == 1 to find u + ZXVert u; + unsigned n_found = 0; + for (const ZXVert& vn : diag.neighbours(v)) { + if (solved.find(vn) == solved.end()) { + u = vn; + ++n_found; + } + } + if (n_found != 1) continue; + + // Can correct u by firing stabilizer of v + fl.c_.insert({u, {v}}); + fl.d_.insert({u, depth}); + solved.insert(u); + + // Determine any new correctors + n_found = 0; + bool in = false; + for (const ZXVert& un : diag.neighbours(u)) { + if (diag.get_zxtype(un) == ZXType::Input) { + in = true; + solved.insert(un); + continue; + } + if (solved.find(un) == solved.end()) { + ++n_found; + } + // Another neighbour of un has been solved, so check if it can now + // correct something + auto it = past.find(un); + if (it != past.end() && it->second > 0) { + --it->second; + if (it->second == 1) new_correctors.insert(un); + } + } + // u is a new corrector if u notin I and |N(u) cap unsolved| == 1 + if (!in) { + past.insert({u, n_found}); + if (n_found == 1) new_correctors.insert(u); + } + } + correctors = new_correctors; + ++depth; + } while (!correctors.empty()); + if (solved.size() != diag.n_vertices()) + throw ZXError("ZXDiagram does not have causal flow"); + return fl; +} + +std::map Flow::gauss_solve_correctors( + const ZXDiagram& diag, const boost::bimap& correctors, + const boost::bimap& preserve, const ZXVertVec& to_solve, + const boost::bimap& ys) { + unsigned n_correctors = correctors.size(); + unsigned n_preserve = preserve.size(); + unsigned n_to_solve = to_solve.size(); + unsigned n_ys = ys.size(); + MatrixXb mat = MatrixXb::Zero(n_preserve + n_ys, n_correctors + n_to_solve); + // Build adjacency matrix + for (boost::bimap::const_iterator it = correctors.begin(), + end = correctors.end(); + it != end; ++it) { + for (const ZXVert& n : diag.neighbours(it->left)) { + auto in_past = preserve.left.find(n); + if (in_past != preserve.left.end()) { + mat(in_past->second, it->right) = true; + } else { + auto in_ys = ys.left.find(n); + if (in_ys != ys.left.end()) { + mat(n_preserve + in_ys->second, it->right) = true; + } + } + } + } + for (boost::bimap::const_iterator it = ys.begin(), + end = ys.end(); + it != end; ++it) { + auto found = correctors.left.find(it->left); + if (found != correctors.left.end()) + mat(n_preserve + it->right, found->second) = true; + } + // Add rhs + for (unsigned i = 0; i < n_to_solve; ++i) { + ZXVert v = to_solve.at(i); + switch (diag.get_zxtype(v)) { + case ZXType::XY: + case ZXType::PX: { + mat(preserve.left.at(v), n_correctors + i) = true; + break; + } + case ZXType::XZ: { + mat(preserve.left.at(v), n_correctors + i) = true; + } + // fall through + case ZXType::YZ: + case ZXType::PZ: { + for (const ZXVert& n : diag.neighbours(v)) { + auto found = preserve.left.find(n); + if (found != preserve.left.end()) + mat(found->second, n_correctors + i) = true; + else { + found = ys.left.find(n); + if (found != ys.left.end()) + mat(n_preserve + found->second, n_correctors + i) = true; + } + } + break; + } + case ZXType::PY: { + mat(n_preserve + ys.left.at(v), n_correctors + i) = true; + break; + } + default: { + throw ZXError( + "Internal error in flow identification: non-MBQC vertex found"); + } + } + } + + // Gaussian elimination + std::vector> row_ops = + gaussian_elimination_row_ops( + mat.block(0, 0, n_preserve + n_ys, n_correctors)); + for (const std::pair& op : row_ops) { + for (unsigned j = 0; j < n_correctors + n_to_solve; ++j) { + mat(op.second, j) ^= mat(op.first, j); + } + } + + // Back substitution + // For each row i, pick a corrector j for which mat(i,j) == true, else + // determine that row i has zero lhs + std::map row_corrector; + for (unsigned i = 0; i < n_preserve + n_ys; ++i) { + for (unsigned j = 0; j < n_correctors; ++j) { + if (mat(i, j)) { + row_corrector.insert({i, correctors.right.at(j)}); + break; + } + } + } + // For each past i, scan down column of rhs and for each mat(j,CI+i) == true, + // add corrector from row j or try next i if row j has zero lhs + std::map solved_flow; + for (unsigned i = 0; i < n_to_solve; ++i) { + bool fail = false; + ZXVertSeqSet c_i; + for (unsigned j = 0; j < n_preserve + n_ys; ++j) { + if (mat(j, n_correctors + i)) { + auto found = row_corrector.find(j); + if (found == row_corrector.end()) { + fail = true; + break; + } else { + c_i.insert(found->second); + } + } + } + if (!fail) { + ZXVert v = to_solve.at(i); + ZXType vt = diag.get_zxtype(v); + if (vt == ZXType::XZ || vt == ZXType::YZ || vt == ZXType::PZ) + c_i.insert(v); + solved_flow.insert({v, c_i}); + } + } + return solved_flow; +} + +Flow Flow::identify_pauli_flow(const ZXDiagram& diag) { + // Check diagram has the expected form for pauli flow + if (!diag.is_MBQC()) + throw ZXError("ZXDiagram must be in MBQC form to identify Pauli flow"); + + ZXVertSeqSet solved; + std::set inputs; + Flow fl{{}, {}}; + + // Tag input measurements + for (const ZXVert& i : diag.get_boundary(ZXType::Input)) { + ZXVert ni = diag.neighbours(i).at(0); + inputs.insert(ni); + ZXType nt = diag.get_zxtype(ni); + if (nt == ZXType::XZ || nt == ZXType::YZ || nt == ZXType::PY) + throw ZXError( + "Inputs measured in XZ, YZ, or Y cannot be corrected with Pauli " + "flow"); + } + + // Indexing of correctors in binary matrix can be preserved between rounds as + // we will only ever add new correctors + boost::bimap correctors; + unsigned corrector_i = 0; + + BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { + switch (diag.get_zxtype(v)) { + case ZXType::Output: { + // Outputs are trivially solved + solved.insert(v); + fl.c_.insert({v, {}}); + fl.d_.insert({v, 0}); + // Cannot use inputs to correct + if (inputs.find(v) == inputs.end()) { + correctors.insert({v, corrector_i}); + ++corrector_i; + } + break; + } + case ZXType::PX: + case ZXType::PY: { + // Can use non-input Xs and Ys to correct + if (inputs.find(v) == inputs.end()) { + correctors.insert({v, corrector_i}); + ++corrector_i; + } + break; + } + default: + break; + } + } + + unsigned depth = 1; + + unsigned n_solved = 0; + do { + // Construct Gaussian elimination problem + boost::bimap preserve; + boost::bimap unsolved_ys; + ZXVertVec to_solve; + BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { + ZXType type = diag.get_zxtype(v); + if (solved.get().find(v) == solved.get().end() && + type != ZXType::Input) { + to_solve.push_back(v); + if (type == ZXType::PY) + unsolved_ys.insert({v, (unsigned)unsolved_ys.size()}); + else if (type != ZXType::PZ) + preserve.insert({v, (unsigned)preserve.size()}); + } + } + + std::map new_corrections = gauss_solve_correctors( + diag, correctors, preserve, to_solve, unsolved_ys); + + n_solved = new_corrections.size(); + + for (const std::pair& nc : new_corrections) { + fl.c_.insert(nc); + fl.d_.insert({nc.first, depth}); + solved.insert(nc.first); + if (inputs.find(nc.first) == inputs.end()) + correctors.insert({nc.first, (unsigned)correctors.size()}); + } + + ++depth; + } while (n_solved != 0); + + if (solved.size() + inputs.size() != diag.n_vertices()) + throw ZXError("ZXDiagram does not have pauli flow"); + + return fl; +} + +std::set Flow::identify_focussed_sets(const ZXDiagram& diag) { + // Check diagram has the expected form for pauli flow + if (!diag.is_MBQC()) + throw ZXError("ZXDiagram must be in MBQC form to identify gflow"); + + std::set inputs; + + // Tag input measurements + for (const ZXVert& i : diag.get_boundary(ZXType::Input)) { + ZXVert ni = diag.neighbours(i).at(0); + inputs.insert(ni); + ZXType nt = diag.get_zxtype(ni); + if (nt == ZXType::XZ || nt == ZXType::YZ || nt == ZXType::PY) + throw ZXError( + "Inputs measured in XZ, YZ, or Y cannot be corrected with Pauli " + "flow"); + } + + // Build Gaussian elimination problem + boost::bimap correctors; + boost::bimap preserve; + boost::bimap ys; + unsigned n_correctors = 0; + unsigned n_preserve = 0; + unsigned n_ys = 0; + + BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { + switch (diag.get_zxtype(v)) { + case ZXType::Output: { + // Cannot use inputs to correct + if (inputs.find(v) == inputs.end()) { + correctors.insert({v, n_correctors}); + ++n_correctors; + } + break; + } + case ZXType::XY: + case ZXType::PX: { + preserve.insert({v, n_preserve}); + ++n_preserve; + // Can use non-input Xs and Ys to correct + if (inputs.find(v) == inputs.end()) { + correctors.insert({v, n_correctors}); + ++n_correctors; + } + break; + } + case ZXType::PY: { + ys.insert({v, n_ys}); + ++n_ys; + // Can use non-input Xs and Ys to correct + if (inputs.find(v) == inputs.end()) { + correctors.insert({v, n_correctors}); + ++n_correctors; + } + break; + } + default: + break; + } + } + + MatrixXb mat = MatrixXb::Zero(n_preserve + n_ys, n_correctors); + + // Build adjacency matrix + for (boost::bimap::const_iterator it = correctors.begin(), + end = correctors.end(); + it != end; ++it) { + for (const ZXVert& n : diag.neighbours(it->left)) { + auto in_preserve = preserve.left.find(n); + if (in_preserve != preserve.left.end()) { + mat(in_preserve->second, it->right) = true; + } else { + auto in_ys = ys.left.find(n); + if (in_ys != ys.left.end()) { + mat(n_preserve + in_ys->second, it->right) = true; + } + } + } + } + for (boost::bimap::const_iterator it = ys.begin(), + end = ys.end(); + it != end; ++it) { + auto found = correctors.left.find(it->left); + if (found != correctors.left.end()) + mat(n_preserve + it->right, found->second) = true; + } + + // Gaussian elimination + std::vector> row_ops = + gaussian_elimination_row_ops(mat); + for (const std::pair& op : row_ops) { + for (unsigned j = 0; j < n_correctors; ++j) { + mat(op.second, j) ^= mat(op.first, j); + } + } + + // Back substitution + // For each column j, it either a leading column (the first column for which + // mat(i,j) == true for a given i, so set row_corrector[i] = j; by Gaussian + // Elimination this is the only entry in the column) or it describes the + // focussed set generator {j} + {row_corrector[i] | mat(i,j) == true} + std::set focussed; + std::map row_corrector; + for (boost::bimap::const_iterator it = correctors.begin(), + end = correctors.end(); + it != end; ++it) { + ZXVertSeqSet fset{it->left}; + bool new_row_corrector = false; + for (unsigned i = 0; i < n_preserve + n_ys; ++i) { + if (mat(i, it->right)) { + auto inserted = row_corrector.insert({i, it->left}); + if (inserted.second) { + // New row_corrector, so move to next column + new_row_corrector = true; + break; + } else { + // Non-correcting column + fset.insert(inserted.first->second); + } + } + } + if (!new_row_corrector) focussed.insert({fset}); + } + + return focussed; +} + +} // namespace zx + +} // namespace tket diff --git a/tket/src/ZX/ZXDExpansions.cpp b/tket/src/ZX/ZXDExpansions.cpp index c9f8fa164f..20578fa8ba 100644 --- a/tket/src/ZX/ZXDExpansions.cpp +++ b/tket/src/ZX/ZXDExpansions.cpp @@ -62,19 +62,44 @@ ZXDiagram ZXDiagram::to_doubled_diagram() const { break; } case ZXType::ZSpider: - case ZXType::XSpider: { - const BasicGen& bg = static_cast(*op); - orig_op = std::make_shared( + case ZXType::XSpider: + case ZXType::XY: + case ZXType::YZ: { + const PhasedGen& bg = static_cast(*op); + orig_op = std::make_shared( op->get_type(), bg.get_param(), QuantumType::Classical); - conj_op = std::make_shared( + conj_op = std::make_shared( op->get_type(), -bg.get_param(), QuantumType::Classical); break; } + case ZXType::XZ: { + const PhasedGen& bg = static_cast(*op); + orig_op = std::make_shared( + op->get_type(), bg.get_param(), QuantumType::Classical); + conj_op = orig_op; + break; + } + case ZXType::PX: + case ZXType::PZ: { + const CliffordGen& bg = static_cast(*op); + orig_op = std::make_shared( + op->get_type(), bg.get_param(), QuantumType::Classical); + conj_op = orig_op; + break; + } + case ZXType::PY: { + const CliffordGen& bg = static_cast(*op); + orig_op = std::make_shared( + op->get_type(), bg.get_param(), QuantumType::Classical); + conj_op = std::make_shared( + op->get_type(), !bg.get_param(), QuantumType::Classical); + break; + } case ZXType::Hbox: { - const BasicGen& bg = static_cast(*op); - orig_op = std::make_shared( + const PhasedGen& bg = static_cast(*op); + orig_op = std::make_shared( op->get_type(), bg.get_param(), QuantumType::Classical); - conj_op = std::make_shared( + conj_op = std::make_shared( op->get_type(), SymEngine::conjugate(bg.get_param()), QuantumType::Classical); break; @@ -174,7 +199,7 @@ ZXDiagram ZXDiagram::to_quantum_embedding() const { if (embedding.get_qtype(b) == QuantumType::Classical) { ZXVert new_b = embedding.add_vertex(embedding.get_zxtype(b), QuantumType::Quantum); - ZXGen_ptr id = std::make_shared( + ZXGen_ptr id = std::make_shared( ZXType::ZSpider, 0., QuantumType::Classical); embedding.set_vertex_ZXGen_ptr(b, id); embedding.add_wire(new_b, b, ZXWireType::Basic, QuantumType::Quantum); diff --git a/tket/src/ZX/ZXDFormats.cpp b/tket/src/ZX/ZXDFormats.cpp new file mode 100644 index 0000000000..5ce8455cb4 --- /dev/null +++ b/tket/src/ZX/ZXDFormats.cpp @@ -0,0 +1,59 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Utils/GraphHeaders.hpp" +#include "ZX/ZXDiagram.hpp" + +namespace tket { + +namespace zx { + +bool ZXDiagram::is_graphlike() const { + BGL_FORALL_EDGES(w, *graph, ZXGraph) { + if (is_boundary_type(get_zxtype(source(w))) || + is_boundary_type(get_zxtype(target(w)))) { + if (get_wire_type(w) != ZXWireType::Basic) return false; + } else { + if (get_wire_type(w) != ZXWireType::H) return false; + } + } + BGL_FORALL_VERTICES(v, *graph, ZXGraph) { + ZXType type = get_zxtype(v); + if (type != ZXType::ZSpider && !is_boundary_type(type)) return false; + } + return true; +} + +bool ZXDiagram::is_MBQC() const { + BGL_FORALL_EDGES(w, *graph, ZXGraph) { + if (get_qtype(w) != QuantumType::Quantum) return false; + if (get_zxtype(source(w)) == ZXType::Input || + get_zxtype(target(w)) == ZXType::Input) { + if (get_wire_type(w) != ZXWireType::Basic) return false; + } else { + if (get_wire_type(w) != ZXWireType::H) return false; + } + } + BGL_FORALL_VERTICES(v, *graph, ZXGraph) { + ZXType type = get_zxtype(v); + if (!is_MBQC_type(type) && type != ZXType::Input && type != ZXType::Output) + return false; + if (get_qtype(v) != QuantumType::Quantum) return false; + } + return true; +} + +} // namespace zx + +} // namespace tket diff --git a/tket/src/ZX/ZXDGettersSetters.cpp b/tket/src/ZX/ZXDGettersSetters.cpp index baa089fd2a..1356fa6c06 100644 --- a/tket/src/ZX/ZXDGettersSetters.cpp +++ b/tket/src/ZX/ZXDGettersSetters.cpp @@ -213,7 +213,7 @@ void ZXDiagram::set_wire_type(const Wire& w, ZXWireType type) { bool ZXDiagram::is_pauli_spider(const ZXVert& v) const { ZXGen_ptr op = get_vertex_ZXGen_ptr(v); if (!is_spider_type(op->get_type())) return false; - const BasicGen& bg = static_cast(*op); + const PhasedGen& bg = static_cast(*op); std::optional pi2_mult = equiv_Clifford(bg.get_param()); return (pi2_mult && ((*pi2_mult % 2) == 0)); } @@ -221,7 +221,7 @@ bool ZXDiagram::is_pauli_spider(const ZXVert& v) const { bool ZXDiagram::is_proper_clifford_spider(const ZXVert& v) const { ZXGen_ptr op = get_vertex_ZXGen_ptr(v); if (!is_spider_type(op->get_type())) return false; - const BasicGen& bg = static_cast(*op); + const PhasedGen& bg = static_cast(*op); std::optional pi2_mult = equiv_Clifford(bg.get_param()); return (pi2_mult && ((*pi2_mult % 2) == 1)); } @@ -249,7 +249,7 @@ static std::string graphviz_vertex_props(ZXGen_ptr op) { } case ZXType::ZSpider: case ZXType::XSpider: { - const BasicGen& bg = static_cast(*op); + const PhasedGen& bg = static_cast(*op); Expr p = bg.get_param(); std::string colour = (type == ZXType::ZSpider) ? "green" : "red"; ss << "fillcolor=\"" << colour << "\" shape=circle label=\""; @@ -258,7 +258,7 @@ static std::string graphviz_vertex_props(ZXGen_ptr op) { break; } case ZXType::Hbox: { - const BasicGen& bg = static_cast(*op); + const PhasedGen& bg = static_cast(*op); Expr p = bg.get_param(); std::optional ev = eval_expr_c(p); ss << "fillcolor=\"gold\" shape=square label=\""; @@ -266,6 +266,15 @@ static std::string graphviz_vertex_props(ZXGen_ptr op) { ss << "\""; break; } + case ZXType::XY: + case ZXType::XZ: + case ZXType::YZ: + case ZXType::PX: + case ZXType::PY: + case ZXType::PZ: { + ss << "shape=point label=\"" << op->get_name() << "\""; + break; + } case ZXType::Triangle: { ss << "fillcolor=\"gold\" shape=triangle"; break; diff --git a/tket/src/ZX/ZXGenerator.cpp b/tket/src/ZX/ZXGenerator.cpp index 9777f1df99..284bf52dd1 100644 --- a/tket/src/ZX/ZXGenerator.cpp +++ b/tket/src/ZX/ZXGenerator.cpp @@ -39,7 +39,8 @@ bool is_boundary_type(ZXType type) { bool is_basic_gen_type(ZXType type) { static const ZXTypeSet basics = { - ZXType::ZSpider, ZXType::XSpider, ZXType::Hbox}; + ZXType::ZSpider, ZXType::XSpider, ZXType::Hbox, ZXType::XY, ZXType::XZ, + ZXType::YZ, ZXType::PX, ZXType::PY, ZXType::PZ}; return find_in_set(type, basics); } @@ -53,6 +54,24 @@ bool is_directed_type(ZXType type) { return find_in_set(type, directed); } +bool is_MBQC_type(ZXType type) { + static const ZXTypeSet MBQC = {ZXType::XY, ZXType::XZ, ZXType::YZ, + ZXType::PX, ZXType::PY, ZXType::PZ}; + return find_in_set(type, MBQC); +} + +bool is_phase_type(ZXType type) { + static const ZXTypeSet phases = {ZXType::ZSpider, ZXType::XSpider, + ZXType::Hbox, ZXType::XY, + ZXType::XZ, ZXType::YZ}; + return find_in_set(type, phases); +} + +bool is_Clifford_gen_type(ZXType type) { + static const ZXTypeSet cliffords = {ZXType::PX, ZXType::PY, ZXType::PZ}; + return find_in_set(type, cliffords); +} + /** * ZXGen (Base class) implementation */ @@ -77,12 +96,21 @@ ZXGen_ptr ZXGen::create_gen(ZXType type, QuantumType qtype) { break; } case ZXType::ZSpider: - case ZXType::XSpider: { - op = std::make_shared(type, 0., qtype); + case ZXType::XSpider: + case ZXType::XY: + case ZXType::XZ: + case ZXType::YZ: { + op = std::make_shared(type, 0., qtype); break; } case ZXType::Hbox: { - op = std::make_shared(type, -1., qtype); + op = std::make_shared(type, -1., qtype); + break; + } + case ZXType::PX: + case ZXType::PY: + case ZXType::PZ: { + op = std::make_shared(type, false, qtype); break; } case ZXType::Triangle: { @@ -99,12 +127,29 @@ ZXGen_ptr ZXGen::create_gen(ZXType type, const Expr& param, QuantumType qtype) { ZXGen_ptr op; switch (type) { case ZXType::ZSpider: - case ZXType::XSpider: { - op = std::make_shared(type, param, qtype); + case ZXType::XSpider: + case ZXType::XY: + case ZXType::XZ: + case ZXType::YZ: + case ZXType::Hbox: { + op = std::make_shared(type, param, qtype); break; } - case ZXType::Hbox: { - op = std::make_shared(type, param, qtype); + default: + throw ZXError( + "Cannot instantiate a parameterised ZXGen of the required " + "type"); + } + return op; +} + +ZXGen_ptr ZXGen::create_gen(ZXType type, bool param, QuantumType qtype) { + ZXGen_ptr op; + switch (type) { + case ZXType::PX: + case ZXType::PY: + case ZXType::PZ: { + op = std::make_shared(type, param, qtype); break; } default: @@ -173,8 +218,8 @@ bool BoundaryGen::operator==(const ZXGen& other) const { * BasicGen implementation */ -BasicGen::BasicGen(ZXType type, const Expr& param, QuantumType qtype) - : ZXGen(type), qtype_(qtype), param_(param) { +BasicGen::BasicGen(ZXType type, QuantumType qtype) + : ZXGen(type), qtype_(qtype) { if (!is_basic_gen_type(type)) { throw ZXError("Unsupported ZXType for BasicGen"); } @@ -188,16 +233,32 @@ bool BasicGen::valid_edge( this->qtype_ == QuantumType::Classical); } -Expr BasicGen::get_param() const { return param_; } +bool BasicGen::operator==(const ZXGen& other) const { + if (!ZXGen::operator==(other)) return false; + const BasicGen& other_basic = static_cast(other); + return this->qtype_ == other_basic.qtype_; +} -SymSet BasicGen::free_symbols() const { return expr_free_symbols(param_); } +/** + * PhasedGen implementation + */ +PhasedGen::PhasedGen(ZXType type, const Expr& param, QuantumType qtype) + : BasicGen(type, qtype), param_(param) { + if (!is_phase_type(type)) { + throw ZXError("Unsupported ZXType for PhasedGen"); + } +} -ZXGen_ptr BasicGen::symbol_substitution( +Expr PhasedGen::get_param() const { return param_; } + +SymSet PhasedGen::free_symbols() const { return expr_free_symbols(param_); } + +ZXGen_ptr PhasedGen::symbol_substitution( const SymEngine::map_basic_basic& sub_map) const { - return std::make_shared(type_, param_.subs(sub_map), qtype_); + return std::make_shared(type_, param_.subs(sub_map), qtype_); } -std::string BasicGen::get_name(bool) const { +std::string PhasedGen::get_name(bool) const { std::stringstream st; if (qtype_ == QuantumType::Quantum) { st << "Q-"; @@ -214,18 +275,75 @@ std::string BasicGen::get_name(bool) const { case ZXType::Hbox: st << "H"; break; + case ZXType::XY: + st << "XY"; + break; + case ZXType::XZ: + st << "XZ"; + break; + case ZXType::YZ: + st << "YZ"; + break; default: - throw ZXError("BasicGen with invalid ZXType"); + throw ZXError("PhasedGen with invalid ZXType"); } st << "(" << param_ << ")"; return st.str(); } -bool BasicGen::operator==(const ZXGen& other) const { - if (!ZXGen::operator==(other)) return false; - const BasicGen& other_basic = static_cast(other); - return ( - this->qtype_ == other_basic.qtype_ && this->param_ == other_basic.param_); +bool PhasedGen::operator==(const ZXGen& other) const { + if (!BasicGen::operator==(other)) return false; + const PhasedGen& other_basic = static_cast(other); + return this->param_ == other_basic.param_; +} + +/** + * CliffordGen implementation + */ +CliffordGen::CliffordGen(ZXType type, bool param, QuantumType qtype) + : BasicGen(type, qtype), param_(param) { + if (!is_Clifford_gen_type(type)) { + throw ZXError("Unsupported ZXType for CliffordGen"); + } +} + +bool CliffordGen::get_param() const { return param_; } + +SymSet CliffordGen::free_symbols() const { return {}; } + +ZXGen_ptr CliffordGen::symbol_substitution( + const SymEngine::map_basic_basic&) const { + return ZXGen_ptr(); +} + +std::string CliffordGen::get_name(bool) const { + std::stringstream st; + if (qtype_ == QuantumType::Quantum) { + st << "Q-"; + } else { + st << "C-"; + } + switch (type_) { + case ZXType::PX: + st << "X"; + break; + case ZXType::PY: + st << "Y"; + break; + case ZXType::PZ: + st << "Z"; + break; + default: + throw ZXError("CliffordGen with invalid ZXType"); + } + st << "(" << param_ << ")"; + return st.str(); +} + +bool CliffordGen::operator==(const ZXGen& other) const { + if (!BasicGen::operator==(other)) return false; + const CliffordGen& other_basic = static_cast(other); + return this->param_ == other_basic.param_; } /** diff --git a/tket/src/ZX/ZXRWAxioms.cpp b/tket/src/ZX/ZXRWAxioms.cpp index e285ea06fa..6f92633b5e 100644 --- a/tket/src/ZX/ZXRWAxioms.cpp +++ b/tket/src/ZX/ZXRWAxioms.cpp @@ -14,6 +14,7 @@ #include "Utils/GraphHeaders.hpp" #include "ZX/Rewrite.hpp" +#include "ZXDiagramImpl.hpp" namespace tket { @@ -32,8 +33,8 @@ bool Rewrite::red_to_green_fun(ZXDiagram& diag) { : ZXWireType::H; } // Replace X spider with Z spider - const BasicGen& x = diag.get_vertex_ZXGen(v); - ZXGen_ptr z = std::make_shared( + const PhasedGen& x = diag.get_vertex_ZXGen(v); + ZXGen_ptr z = std::make_shared( ZXType::ZSpider, x.get_param(), *x.get_qtype()); diag.set_vertex_ZXGen_ptr(v, z); } @@ -44,7 +45,9 @@ Rewrite Rewrite::red_to_green() { return Rewrite(red_to_green_fun); } bool Rewrite::spider_fusion_fun(ZXDiagram& diag) { bool success = false; + std::set bin; BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { + if (bin.contains(v)) continue; ZXType vtype = diag.get_zxtype(v); if (!is_spider_type(vtype)) continue; /** @@ -60,6 +63,7 @@ bool Rewrite::spider_fusion_fun(ZXDiagram& diag) { adj_list.pop_front(); ZXWireType wtype = diag.get_wire_type(w); ZXVert u = diag.other_end(w, v); + if (bin.contains(u)) continue; ZXType utype = diag.get_zxtype(u); bool same_colour = vtype == utype; if (!is_spider_type(utype) || u == v || @@ -68,9 +72,9 @@ bool Rewrite::spider_fusion_fun(ZXDiagram& diag) { // The spiders `u` and `v` can be fused together // We merge into `v` and remove `u` so that we can efficiently continue to // search the neighbours - const BasicGen& vspid = diag.get_vertex_ZXGen(v); - const BasicGen& uspid = diag.get_vertex_ZXGen(u); - ZXGen_ptr new_spid = std::make_shared( + const PhasedGen& vspid = diag.get_vertex_ZXGen(v); + const PhasedGen& uspid = diag.get_vertex_ZXGen(u); + ZXGen_ptr new_spid = std::make_shared( vtype, vspid.get_param() + uspid.get_param(), (vspid.get_qtype() == QuantumType::Classical || uspid.get_qtype() == QuantumType::Classical) @@ -102,10 +106,13 @@ bool Rewrite::spider_fusion_fun(ZXDiagram& diag) { adj_list.push_back(new_w); } // Remove `u` - diag.remove_vertex(u); + bin.insert(u); success = true; } } + for (ZXVert u : bin) { + diag.remove_vertex(u); + } return success; } @@ -139,8 +146,8 @@ bool Rewrite::self_loop_removal_fun(ZXDiagram& diag) { success = true; } if ((n_pis % 2) == 1) { - const BasicGen& spid = diag.get_vertex_ZXGen(v); - ZXGen_ptr new_spid = std::make_shared( + const PhasedGen& spid = diag.get_vertex_ZXGen(v); + ZXGen_ptr new_spid = std::make_shared( vtype, spid.get_param() + 1., vqtype); diag.set_vertex_ZXGen_ptr(v, new_spid); } diff --git a/tket/src/ZX/ZXRWDecompositions.cpp b/tket/src/ZX/ZXRWDecompositions.cpp index 0c8eb209a5..75cece0eb0 100644 --- a/tket/src/ZX/ZXRWDecompositions.cpp +++ b/tket/src/ZX/ZXRWDecompositions.cpp @@ -79,8 +79,8 @@ Rewrite Rewrite::decompose_boxes() { return Rewrite(decompose_boxes_fun); } bool Rewrite::basic_wires_fun(ZXDiagram& diag) { ZXGen_ptr qhad = - std::make_shared(ZXType::Hbox, -1, QuantumType::Quantum); - ZXGen_ptr chad = std::make_shared( + std::make_shared(ZXType::Hbox, -1, QuantumType::Quantum); + ZXGen_ptr chad = std::make_shared( ZXType::Hbox, -1, QuantumType::Classical); WireVec targets; BGL_FORALL_EDGES(w, *diag.graph, ZXGraph) { diff --git a/tket/src/ZX/ZXRWGraphLikeForm.cpp b/tket/src/ZX/ZXRWGraphLikeForm.cpp index f45ad34758..c849d08ddc 100644 --- a/tket/src/ZX/ZXRWGraphLikeForm.cpp +++ b/tket/src/ZX/ZXRWGraphLikeForm.cpp @@ -39,7 +39,7 @@ bool Rewrite::separate_boundaries_fun(ZXDiagram& diag) { } // New wires will inherit `w`'s `qtype` QuantumType wq = diag.get_qtype(w); - ZXGen_ptr id = std::make_shared(ZXType::ZSpider, 0., wq); + ZXGen_ptr id = std::make_shared(ZXType::ZSpider, 0., wq); ZXVert z_at_b = diag.add_vertex(id); diag.add_wire(b, z_at_b, ZXWireType::Basic, wq); ZXVert z_at_o = diag.add_vertex(id); diff --git a/tket/src/ZX/ZXRWGraphLikeSimplification.cpp b/tket/src/ZX/ZXRWGraphLikeSimplification.cpp index cb93ca8370..d9f180c593 100644 --- a/tket/src/ZX/ZXRWGraphLikeSimplification.cpp +++ b/tket/src/ZX/ZXRWGraphLikeSimplification.cpp @@ -36,6 +36,7 @@ static bool can_complement_neighbourhood( } bool Rewrite::remove_interior_cliffords_fun(ZXDiagram& diag) { + if (!diag.is_graphlike()) return false; bool success = false; ZXVertSeqSet candidates; BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { candidates.insert(v); } @@ -45,7 +46,7 @@ bool Rewrite::remove_interior_cliffords_fun(ZXDiagram& diag) { ZXVert v = *it; view.erase(it); if (!diag.is_proper_clifford_spider(v)) continue; - const BasicGen& spid = diag.get_vertex_ZXGen(v); + const PhasedGen& spid = diag.get_vertex_ZXGen(v); QuantumType vqtype = *spid.get_qtype(); ZXVertVec neighbours = diag.neighbours(v); if (!can_complement_neighbourhood(diag, vqtype, neighbours)) continue; @@ -69,14 +70,14 @@ bool Rewrite::remove_interior_cliffords_fun(ZXDiagram& diag) { diag.add_wire(*xi, *yi, ZXWireType::H, vqtype); } } - const BasicGen& xi_op = diag.get_vertex_ZXGen(*xi); + const PhasedGen& xi_op = diag.get_vertex_ZXGen(*xi); // If `v` is Quantum, Classical neighbours will pick up both the +theta // and -theta phases, cancelling out if (vqtype == QuantumType::Quantum && *xi_op.get_qtype() == QuantumType::Classical) continue; // Update phase information - ZXGen_ptr xi_new_op = std::make_shared( + ZXGen_ptr xi_new_op = std::make_shared( ZXType::ZSpider, xi_op.get_param() - spid.get_param(), *xi_op.get_qtype()); diag.set_vertex_ZXGen_ptr(*xi, xi_new_op); @@ -96,8 +97,8 @@ Rewrite Rewrite::remove_interior_cliffords() { static void add_phase_to_vertices( ZXDiagram& diag, const ZXVertSeqSet& verts, const Expr& phase) { for (const ZXVert& v : verts) { - const BasicGen& old_spid = diag.get_vertex_ZXGen(v); - ZXGen_ptr new_spid = std::make_shared( + const PhasedGen& old_spid = diag.get_vertex_ZXGen(v); + ZXGen_ptr new_spid = std::make_shared( ZXType::ZSpider, old_spid.get_param() + phase, *old_spid.get_qtype()); diag.set_vertex_ZXGen_ptr(v, new_spid); } @@ -123,6 +124,7 @@ static void bipartite_complementation( } bool Rewrite::remove_interior_paulis_fun(ZXDiagram& diag) { + if (!diag.is_graphlike()) return false; bool success = false; ZXVertSeqSet candidates; // Need an indirect iterator as BGL_FORALL_VERTICES // breaks when removing the current vertex @@ -167,8 +169,8 @@ bool Rewrite::remove_interior_paulis_fun(ZXDiagram& diag) { } excl_u.erase(v); excl_v.erase(joint.begin(), joint.end()); - const BasicGen& v_spid = diag.get_vertex_ZXGen(v); - const BasicGen& u_spid = diag.get_vertex_ZXGen(u); + const PhasedGen& v_spid = diag.get_vertex_ZXGen(v); + const PhasedGen& u_spid = diag.get_vertex_ZXGen(u); add_phase_to_vertices( diag, joint, v_spid.get_param() + u_spid.get_param() + 1.); @@ -194,6 +196,7 @@ Rewrite Rewrite::remove_interior_paulis() { } bool Rewrite::extend_at_boundary_paulis_fun(ZXDiagram& diag) { + if (!diag.is_graphlike()) return false; bool success = false; for (const ZXVert& b : diag.get_boundary()) { // Valid ZX graph requires boundaries to have a unique neighbour @@ -220,7 +223,8 @@ bool Rewrite::extend_at_boundary_paulis_fun(ZXDiagram& diag) { // extend it ZXGen_ptr u_op = diag.get_vertex_ZXGen_ptr(u); QuantumType qtype = *u_op->get_qtype(); - ZXGen_ptr id = std::make_shared(ZXType::ZSpider, 0., qtype); + ZXGen_ptr id = + std::make_shared(ZXType::ZSpider, 0., qtype); ZXVert z1 = diag.add_vertex(id); ZXVert z2 = diag.add_vertex(u_op); diag.add_wire(u, z1, ZXWireType::H, qtype); diff --git a/tket/src/ZX/include/ZX/Flow.hpp b/tket/src/ZX/include/ZX/Flow.hpp new file mode 100644 index 0000000000..d1baaf29eb --- /dev/null +++ b/tket/src/ZX/include/ZX/Flow.hpp @@ -0,0 +1,98 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Utils/BiMapHeaders.hpp" +#include "ZX/ZXDiagram.hpp" + +namespace tket { + +namespace zx { + +/** + * Data structure for flow in qubit MBQC. + * Different classes of flow exist based on the types of measurements and + * correction sets accepted, but the contents of the flow are the same. Causal < + * XY gflow < 3Plane gflow < Pauli flow + * + * `c` defines the correction set for each measured vertex. + * `d` approximates the partial order by giving the depth of the measurement + * from the output, i.e. d(u) < d(v) => v is measured before u. + */ +class Flow { + public: + Flow( + const std::map& c, + const std::map& d); + + // Returns the correction set for a given measured vertex (those vertices + // receiving an X correction) Will fail with a map.at error if v is not in the + // flow + ZXVertSeqSet c(const ZXVert& v) const; + // Returns the odd neighbourhood of the correction set for a given measured + // vertex (those vertices receiving a Z correction) Will fail with a map.at + // error if v is not in the flow + ZXVertSeqSet odd(const ZXVert& v, const ZXDiagram& diag) const; + // Returns the depth from the outputs in the ordering of the flow + // e.g. an output vertex will have depth 0, the last measured vertex has depth + // 1 + unsigned d(const ZXVert& v) const; + + // Verify that a flow is well-formed according to the Pauli flow conditions + // Throws a ZXError if any condition is violated + void verify(const ZXDiagram& diag) const; + + // Focusses a flow according to Lemma B.5, Simmons "Relating Measurement + // Patterns to Circuits via Pauli Flow" https://arxiv.org/pdf/2109.05654.pdf + void focus(const ZXDiagram& diag); + + // Attempts to identify a causal flow for a diagram + // Follows Algorithm 1 from Mhalla & Perdrix "Finding Optimal Flows + // Efficiently" https://arxiv.org/pdf/0709.2670.pdf O(n^2 log n) for n + // vertices + static Flow identify_causal_flow(const ZXDiagram& diag); + // Attempts to identify a Pauli flow for a diagram + // Follows Algorithm 1 from Simmons "Relating Measurement Patterns to Circuits + // via Pauli Flow" https://arxiv.org/pdf/2109.05654.pdf O(n^4) for n vertices + static Flow identify_pauli_flow(const ZXDiagram& diag); + + // Attempts to identify focussed sets according to Lemma B.10, Simmons + // "Relating Measurement Patterns to Circuits via Pauli Flow" + // https://arxiv.org/pdf/2109.05654.pdf + static std::set identify_focussed_sets(const ZXDiagram& diag); + + private: + // Correction sets + std::map c_; + // Approximate the partial order by recording the depth from outputs + std::map d_; + + // Solve for corrections using Gaussian elimination and back substitution + // Used within identify_pauli_flow + // correctors are those vertices which may be included in the correction sets + // preserve are those vertices which may not be included in the odd + // neighbourhood (unless being corrected) to_solve are those vertices that are + // yet to find corrections ys are all vertices with ZXType::PY The maps + // convert between row/column indices in the matrix and vertices in the + // diagram + static std::map gauss_solve_correctors( + const ZXDiagram& diag, const boost::bimap& correctors, + const boost::bimap& preserve, const ZXVertVec& to_solve, + const boost::bimap& ys); +}; + +} // namespace zx + +} // namespace tket diff --git a/tket/src/ZX/include/ZX/ZXDiagram.hpp b/tket/src/ZX/include/ZX/ZXDiagram.hpp index 084d46cf2b..32a61ee13d 100644 --- a/tket/src/ZX/include/ZX/ZXDiagram.hpp +++ b/tket/src/ZX/include/ZX/ZXDiagram.hpp @@ -20,9 +20,10 @@ namespace tket { namespace zx { -// Forward declare Rewrite, ZXDiagramPybind for friend access +// Forward declare Rewrite, ZXDiagramPybind, Flow for friend access class Rewrite; class ZXDiagramPybind; +class Flow; class ZXDiagram { private: @@ -166,6 +167,14 @@ class ZXDiagram { // Whether the diagram contains any symbolic parameters bool is_symbolic() const; + // Whether the diagram is graphlike (ZSpiders and H edges, Basics to + // boundaries) + bool is_graphlike() const; + + // Whether the diagram is MBQC (MBQC, Inputs, and Outputs, Basic from Input, H + // otherwise) + bool is_MBQC() const; + /** * Produces graphviz string, applying `highlights` to some vertices. * Inputs: @@ -293,6 +302,7 @@ class ZXDiagram { friend Rewrite; friend ZXDiagramPybind; + friend Flow; private: /** diff --git a/tket/src/ZX/include/ZX/ZXGenerator.hpp b/tket/src/ZX/include/ZX/ZXGenerator.hpp index 28e300c036..154bacf091 100644 --- a/tket/src/ZX/include/ZX/ZXGenerator.hpp +++ b/tket/src/ZX/include/ZX/ZXGenerator.hpp @@ -41,6 +41,7 @@ enum class ZXType { * Symmetric generators */ // Z (green) spider + // Equivalently, a (postselected) XY qubit (with negative phase) in MBQC ZSpider, // X (red) spider @@ -49,6 +50,35 @@ enum class ZXType { // Hbox Hbox, + // A (postselected) XY qubit in MBQC + // Corresponds to a Z spider with negative phase + XY, + + // A (postselected) XZ qubit in MBQC + // Corresponds to a 0.5-phase (n+1)-ary Z spider connected to a phaseful 1-ary + // X spider + XZ, + + // A (postselected) YZ qubit in MBQC + // Corresponds to a 0-phase (n+1)-ary Z spider connected to a phaseful 1-ary X + // spider + YZ, + + // A (postselected) Pauli X qubit in MBQC + // Corresponds to a Z spider with phase either 0 (param=False) or 1 + // (param=True) + PX, + + // A (postselected) Pauli Y qubit in MBQC + // Corresponds to a Z spider with phase either -0.5 (param=False) or +0.5 + // (param=True) + PY, + + // A (postselected) Pauli Z qubit in MBQC + // Corresponds to a 0-phase (n+1)-ary Z spider connected to a 1-ary X spider + // with phase either 0 (param=False) or 1 (param=True) + PZ, + /** * Directed (non-commutative) generators */ @@ -71,6 +101,9 @@ bool is_boundary_type(ZXType type); bool is_basic_gen_type(ZXType type); bool is_spider_type(ZXType type); bool is_directed_type(ZXType type); +bool is_MBQC_type(ZXType type); +bool is_phase_type(ZXType type); +bool is_Clifford_gen_type(ZXType type); // Forward declaration so we can use ZXGen_ptr in the interface of ZXGen class ZXGen; @@ -140,6 +173,8 @@ class ZXGen { ZXType type, QuantumType qtype = QuantumType::Quantum); static ZXGen_ptr create_gen( ZXType type, const Expr& param, QuantumType qtype = QuantumType::Quantum); + static ZXGen_ptr create_gen( + ZXType type, bool param, QuantumType qtype = QuantumType::Quantum); protected: ZXGen(ZXType type); @@ -172,23 +207,42 @@ class BoundaryGen : public ZXGen { }; /** - * Implementation of ZXGen for undirected (commutative) generators. + * Virtual subclass of ZXGen for undirected (commutative) generators. * `std::nullopt` is used for ports as there is no need to distinguish. * If the generator is Quantum, all adjacent wires must also be Quantum. * If the generator is Classical, adjacent wires can be either Quantum or - * Classical. Each known generator only uses a single parameter. + * Classical. + * Implementations include PhasedGen for generators with 1 Expr parameter or + * CliffordGen for Clifford generators with 1 bool parameter. */ class BasicGen : public ZXGen { public: - BasicGen( - ZXType type, const Expr& param, QuantumType qtype = QuantumType::Quantum); - - Expr get_param() const; + BasicGen(ZXType type, QuantumType qtype = QuantumType::Quantum); // Overrides from ZXGen virtual std::optional get_qtype() const override; virtual bool valid_edge( std::optional port, QuantumType qtype) const override; + virtual bool operator==(const ZXGen& other) const override; + + protected: + const QuantumType qtype_; +}; + +/** + * Implementation of BasicGen for phased generators, e.g. spiders, Hbox. + * Each generator has a single Expr parameter which is: + * - A complex number for Hbox + * - A real-valued phase in half-turns otherwise + */ +class PhasedGen : public BasicGen { + public: + PhasedGen( + ZXType type, const Expr& param, QuantumType qtype = QuantumType::Quantum); + + Expr get_param() const; + + // Overrides from ZXGen virtual SymSet free_symbols() const override; virtual ZXGen_ptr symbol_substitution( const SymEngine::map_basic_basic& sub_map) const override; @@ -196,10 +250,32 @@ class BasicGen : public ZXGen { virtual bool operator==(const ZXGen& other) const override; protected: - const QuantumType qtype_; const Expr param_; }; +/** + * Implementation of BasicGen for Clifford generators. + * The basis is determined by the ZX type, and the boolean parameter determines + * the discrete phase (false = 0 versus true = 1 half-turn). + */ +class CliffordGen : public BasicGen { + public: + CliffordGen( + ZXType type, bool param, QuantumType qtype = QuantumType::Quantum); + + bool get_param() const; + + // Overrides from ZXGen + virtual SymSet free_symbols() const override; + virtual ZXGen_ptr symbol_substitution( + const SymEngine::map_basic_basic& sub_map) const override; + virtual std::string get_name(bool latex = false) const override; + virtual bool operator==(const ZXGen& other) const override; + + protected: + const bool param_; +}; + /** * Virtual subclass of ZXGen for directed (non-commutative) generators. * The generator has a pre-determined number of ports labelled from 0 to diff --git a/tket/tests/CMakeLists.txt b/tket/tests/CMakeLists.txt index ac5960be31..84b3c17691 100644 --- a/tket/tests/CMakeLists.txt +++ b/tket/tests/CMakeLists.txt @@ -44,9 +44,10 @@ endif() set(TKET_TESTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}) +include(tkettestutilsfiles.cmake) include(tkettestsfiles.cmake) -add_executable(test_tket ${TEST_SOURCES}) +add_executable(test_tket ${TESTUTILS_SOURCES} ${TEST_SOURCES}) target_link_libraries(test_tket PRIVATE tket-ArchAwareSynth diff --git a/tket/tests/Circuit/test_Circ.cpp b/tket/tests/Circuit/test_Circ.cpp index d60b463a98..af5c647588 100644 --- a/tket/tests/Circuit/test_Circ.cpp +++ b/tket/tests/Circuit/test_Circ.cpp @@ -2609,11 +2609,17 @@ SCENARIO("Named operation groups") { Op_ptr x_op = get_op_ptr(OpType::X); REQUIRE(c.substitute_named(x_op, "group1")); + std::unordered_set opgroups({"group1", "group2"}); + REQUIRE(c.get_opgroups() == opgroups); + Circuit c2(2); c2.add_op(OpType::T, {0}); c2.add_op(OpType::CRx, 0.1, {0, 1}, "group2a"); REQUIRE(c.substitute_named(c2, "group2")); + std::unordered_set opgroups2({"group1", "group2a"}); + REQUIRE(c.get_opgroups() == opgroups2); + REQUIRE(c.count_gates(OpType::H) == 1); REQUIRE(c.count_gates(OpType::S) == 0); REQUIRE(c.count_gates(OpType::X) == 3); @@ -2647,6 +2653,8 @@ SCENARIO("Named operation groups") { Circuit c1 = c; REQUIRE(c == c1); + REQUIRE(c.get_opgroups() == opgroups2); + REQUIRE(c1.get_opgroups() == opgroups2); } GIVEN("Negative tests for operation groups") { Circuit c(2); diff --git a/tket/tests/Placement/test_NeighbourPlacements.cpp b/tket/tests/Placement/test_NeighbourPlacements.cpp new file mode 100644 index 0000000000..a167c6ea35 --- /dev/null +++ b/tket/tests/Placement/test_NeighbourPlacements.cpp @@ -0,0 +1,147 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "../testutil.hpp" + +namespace tket { +namespace test_NeighbourPlacements { + +using Connection = Architecture::Connection; + +SCENARIO("class NeighbourPlacments") { + GIVEN("a realistic-ish instance") { + Architecture arc( + {{Node(4), Node(5)}, + {Node(5), Node(6)}, + {Node(6), Node(7)}, + {Node(5), Node(7)}}); + qubit_mapping_t map( + {{Qubit(0), Node(4)}, + {Qubit(1), Node(5)}, + {Qubit(2), Node(6)}, + {Qubit(3), Node(7)}}); + NeighbourPlacements np(arc, map); + + WHEN("Getting a placement dist=0") { + auto res = np.get(0, 1); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + THEN("The resulting map is identical") { + for (auto [k, v] : map) { + REQUIRE(new_map.contains(k)); + REQUIRE(new_map[k] == v); + } + } + } + + WHEN("Getting a placement dist=2, optimise=true") { + auto res = np.get(2, 1); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + THEN("The results are valid") { + REQUIRE(new_map.size() == 4); + REQUIRE(swaps.size() == 2); + for (unsigned i = 0; i < 4; ++i) { + REQUIRE(new_map.contains(Qubit(i))); + } + } + THEN("The resulting map is correct") { + REQUIRE(new_map[Qubit(0)] == Node(4)); + REQUIRE(new_map[Qubit(1)] == Node(7)); + REQUIRE(new_map[Qubit(2)] == Node(5)); + REQUIRE(new_map[Qubit(3)] == Node(6)); + } + THEN("The swaps are correct") { + REQUIRE(swaps[0] == std::pair{Node(5), Node(7)}); + REQUIRE(swaps[1] == std::pair{Node(5), Node(6)}); + } + } + WHEN("Getting 10 placement dist=3, optimise=true") { + auto res = np.get(3, 10); + THEN("There are 10 resulting placements") { REQUIRE(res.size() == 10); } + } + } + GIVEN("the simplest possible instance") { + Architecture arc(std::vector>{{Node(0), Node(1)}}); + qubit_mapping_t map({{Qubit(0), Node(0)}, {Qubit(1), Node(1)}}); + NeighbourPlacements np(arc, map); + WHEN("Getting a placement dist=2, optimise=false") { + auto res = np.get(2, 1, false); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + THEN("Both swaps are identical") { + REQUIRE(swaps.size() == 2); + REQUIRE(swaps[0] == swaps[1]); + } + } + WHEN("Getting a placement dist=2, optimise=true") { + THEN("Con only find a solution with dist=1") { + auto res = np.get(2, 1, true); + REQUIRE(res.size() == 1); + REQUIRE(res.front().swaps.size() == 1); + } + } + WHEN("Getting two placements of dist=1") { + THEN("Can only find one result") { + REQUIRE(np.get(1, 2, false, 100).size() == 1); + } + } + } + GIVEN("an instance with unlucky seed") { + Architecture arc({{Node(0), Node(1)}, {Node(1), Node(2)}}); + qubit_mapping_t map( + {{Qubit(0), Node(0)}, {Qubit(1), Node(1)}, {Qubit(2), Node(2)}}); + NeighbourPlacements np(arc, map); + + // find unlucky seed + unsigned seed; + for (seed = 0; seed < 10; ++seed) { + auto res = np.get(2, 1, false, seed); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + REQUIRE(swaps.size() == 2); + if (swaps[0] == swaps[1]) { + break; + } + } + THEN("There is an unlucky seed") { REQUIRE(seed < 10u); } + + WHEN("Getting a placement dist=2, optimise=false and fixed seed") { + auto res = np.get(2, 1, false, seed); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + THEN("Both swaps are identical") { + REQUIRE(swaps.size() == 2); + REQUIRE(swaps[0] == swaps[1]); + } + } + WHEN("Getting a placement dist=2, optimise=true and fixed seed") { + auto res = np.get(2, 1, true, seed); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + THEN("Both swaps are now different") { + REQUIRE(swaps.size() == 2); + REQUIRE(swaps[0] != swaps[1]); + } + } + } +} + +} // namespace test_NeighbourPlacements +} // namespace tket diff --git a/tket/tests/test_Placement.cpp b/tket/tests/Placement/test_Placement.cpp similarity index 91% rename from tket/tests/test_Placement.cpp rename to tket/tests/Placement/test_Placement.cpp index b03036e832..2167f8db92 100644 --- a/tket/tests/test_Placement.cpp +++ b/tket/tests/Placement/test_Placement.cpp @@ -15,8 +15,8 @@ #include #include +#include "../testutil.hpp" #include "Placement/Placement.hpp" -#include "testutil.hpp" namespace tket { namespace test_Placement { @@ -434,16 +434,6 @@ SCENARIO("Check Monomorpher satisfies correct placement conditions") { } Monomorpher morph(test_circ, arc, {}, {10, arc.n_connections()}); - /*std::vector results = morph.place(1); - THEN("The circuit is placed in the highly connected region.") { - std::set middle_nodes = {5, 6, 9, 10}; - for (auto map : results) { - for (auto mapping : map.map) { - REQUIRE(middle_nodes.find(arc.map_node( - mapping.second)) != middle_nodes.end()); - } - } - }*/ } } } @@ -497,9 +487,68 @@ SCENARIO( REQUIRE(potential_maps.size() > 0); } } +SCENARIO("Test NaivePlacement class") { + Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}}); + GIVEN( + "No Qubits placed in Circuit, same number of qubits and architecture " + "nodes.") { + Circuit test_circ(7); + NaivePlacement np(test_arc); + qubit_mapping_t p = np.get_placement_map(test_circ); + REQUIRE(p[Qubit(0)] == Node(0)); + REQUIRE(p[Qubit(1)] == Node(1)); + REQUIRE(p[Qubit(2)] == Node(2)); + REQUIRE(p[Qubit(3)] == Node(3)); + REQUIRE(p[Qubit(4)] == Node(4)); + REQUIRE(p[Qubit(5)] == Node(5)); + REQUIRE(p[Qubit(6)] == Node(6)); + } + GIVEN("No Qubits placed in Circuit, less qubits than architecture nodes.") { + Circuit test_circ(6); + NaivePlacement np(test_arc); + qubit_mapping_t p = np.get_placement_map(test_circ); + REQUIRE(p[Qubit(0)] == Node(0)); + REQUIRE(p[Qubit(1)] == Node(1)); + REQUIRE(p[Qubit(2)] == Node(2)); + REQUIRE(p[Qubit(3)] == Node(3)); + REQUIRE(p[Qubit(4)] == Node(4)); + REQUIRE(p[Qubit(5)] == Node(5)); + } + GIVEN( + "Some Qubits placed in Circuit, same number of qubits and architecture " + "nodes.") { + Circuit test_circ(4); + test_circ.add_qubit(Node(0)); + test_circ.add_qubit(Node(1)); + test_circ.add_qubit(Node(2)); + NaivePlacement np(test_arc); + qubit_mapping_t p = np.get_placement_map(test_circ); + + REQUIRE(p[Qubit(0)] == Node(3)); + REQUIRE(p[Qubit(1)] == Node(4)); + REQUIRE(p[Qubit(2)] == Node(5)); + REQUIRE(p[Qubit(3)] == Node(6)); + REQUIRE(p[Node(0)] == Node(0)); + REQUIRE(p[Node(1)] == Node(1)); + REQUIRE(p[Node(2)] == Node(2)); + } + GIVEN("Some Qubits placed in Circuit, less qubits than architecture nodes.") { + Circuit test_circ(2); + test_circ.add_qubit(Node(0)); + test_circ.add_qubit(Node(1)); + test_circ.add_qubit(Node(2)); + NaivePlacement np(test_arc); + qubit_mapping_t p = np.get_placement_map(test_circ); + + REQUIRE(p[Qubit(0)] == Node(3)); + REQUIRE(p[Qubit(1)] == Node(4)); + REQUIRE(p[Node(0)] == Node(0)); + REQUIRE(p[Node(1)] == Node(1)); + REQUIRE(p[Node(2)] == Node(2)); + } +} // Tests for new placement method wrappers - SCENARIO( "Does the base Placement class correctly modify Circuits and return " "maps?") { diff --git a/tket/tests/ZX/test_Flow.cpp b/tket/tests/ZX/test_Flow.cpp new file mode 100644 index 0000000000..18ae19bb87 --- /dev/null +++ b/tket/tests/ZX/test_Flow.cpp @@ -0,0 +1,337 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "ZX/Flow.hpp" + +namespace tket { + +namespace zx { + +namespace test_flow { + +SCENARIO("Testing flow verification") { + // Diagram combines Ex. 2.43, "There and back again: a circuit extraction + // tale", Backens et al. 2021 and Ex. C.13, "Relating measurement patterns to + // circuits via Pauli flow", Simmons 2021 + ZXDiagram diag(1, 3, 0, 0); + ZXVertVec ins = diag.get_boundary(ZXType::Input); + ZXVertVec outs = diag.get_boundary(ZXType::Output); + // Gflow example from Backens et al. + ZXVert ga = diag.add_vertex(ZXType::XY, 0.3); + ZXVert gb = diag.add_vertex(ZXType::XY, 0.7); + ZXVert gc = diag.add_vertex(ZXType::XZ, 1.4); + ZXVert gd = diag.add_vertex(ZXType::YZ, 0.9); + diag.add_wire(ins.at(0), ga); + diag.add_wire(ga, gb, ZXWireType::H); + diag.add_wire(gb, gc, ZXWireType::H); + diag.add_wire(gb, gd, ZXWireType::H); + diag.add_wire(gc, gd, ZXWireType::H); + diag.add_wire(gb, outs.at(0), ZXWireType::H); + // Pauli flow example from Simmons (angles cut to Paulis) + ZXVert pi = diag.add_vertex(ZXType::XY, 0.9); + ZXVert pa = diag.add_vertex(ZXType::PZ); + ZXVert pb = diag.add_vertex(ZXType::PX); + ZXVert pc = diag.add_vertex(ZXType::XY, 0.2); + ZXVert pd = diag.add_vertex(ZXGen::create_gen(ZXType::PY, true)); + diag.add_wire(gc, pi, ZXWireType::H); + diag.add_wire(pi, pb, ZXWireType::H); + diag.add_wire(pa, pb, ZXWireType::H); + diag.add_wire(pa, pc, ZXWireType::H); + diag.add_wire(pa, pd, ZXWireType::H); + diag.add_wire(pb, pd, ZXWireType::H); + diag.add_wire(pc, pd, ZXWireType::H); + diag.add_wire(pc, outs.at(1), ZXWireType::H); + diag.add_wire(pd, outs.at(2), ZXWireType::H); + + // Give a valid Pauli flow + std::map c{ + {ga, {gb}}, // Odd = {ga, gc, gd, outs[0]} + {gb, {gc}}, // Odd = {gb, gc, pi} + {gc, {gc, gd}}, // Odd = {gc, gd, pi} + {gd, {gd, outs.at(0), pi}}, // Odd = {pb} + {pi, {pb, outs.at(2)}}, // Odd = {pi, pa} + {pa, {pa, pc, pd, outs.at(2)}}, // Odd = {pd, outs[1], outs[2]} + {pb, {pc, pd, outs.at(1)}}, // Odd = {pb, pd, outs[1], outs[2]} + {pc, {outs.at(1)}}, // Odd = {pc} + {pd, {outs.at(2)}}, // Odd = {pd} + }; + std::map d{ + {ga, 7}, {gb, 6}, {gc, 5}, {gd, 4}, + {pi, 3}, {pa, 2}, {pb, 2}, {pc, 1}, + {pd, 1}, {outs.at(0), 0}, {outs.at(1), 0}, {outs.at(2), 0}, + }; + + Flow fl{c, d}; + REQUIRE_NOTHROW(fl.verify(diag)); + + // Check for ordering of corrections + d.at(ga) = 4; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "A qubit has an X correction in its past"); + d.at(gb) = 3; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "A qubit has a Z correction in its past"); + // Revert to valid flow + d.at(ga) = 7; + d.at(gb) = 6; + + // Check history Y measurements have Y corrections + diag.set_vertex_ZXGen_ptr(pb, ZXGen::create_gen(ZXType::PY)); + c.at(pa) = {pa}; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "A past Y vertex receives a Z correction"); + c.at(pa) = {pa, pc, pd}; + d.at(pd) = 2; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "A past Y vertex receives an X correction"); + // Revert to valid flow + diag.set_vertex_ZXGen_ptr(pb, ZXGen::create_gen(ZXType::PX)); + c.at(pa) = {pa, pc, pd, outs.at(2)}; + d.at(pd) = 1; + + // Check all basis corrections are ok + // Correct XY with I, X, Y + std::vector cs{ + {}, {pc, outs.at(2)}, {pc, outs.at(1), outs.at(2)}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(pc) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "XY vertex must be corrected with a Z"); + } + c.at(pc) = {outs.at(1)}; + // Correct XZ with I, X, Z + cs = {{}, {gc, outs.at(0)}, {pi}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(gc) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "XZ vertex must be corrected with a Y"); + } + c.at(gc) = {gc, gd}; + // Correct YZ with I, Y, Z + diag.set_vertex_ZXGen_ptr(pa, ZXGen::create_gen(ZXType::YZ, Expr(1.2))); + cs = {{}, {pa, pd}, {pc}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(pa) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "YZ vertex must be corrected with an X"); + } + diag.set_vertex_ZXGen_ptr(pa, ZXGen::create_gen(ZXType::PZ)); + c.at(pa) = {pa, pc, pd, outs.at(2)}; + // Correct PX with I, X + diag.set_vertex_ZXGen_ptr(pc, ZXGen::create_gen(ZXType::PX)); + cs = {{}, {pc, outs.at(2)}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(pc) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "PX vertex must be corrected with a Y or Z"); + } + diag.set_vertex_ZXGen_ptr(pc, ZXGen::create_gen(ZXType::XY, Expr(0.2))); + c.at(pc) = {outs.at(1)}; + // Correct PY with I, Y + diag.set_vertex_ZXGen_ptr(pc, ZXGen::create_gen(ZXType::PY)); + cs = {{}, {pc, outs.at(1), outs.at(2)}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(pc) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "PY vertex must be corrected with an X or Z"); + } + diag.set_vertex_ZXGen_ptr(pc, ZXGen::create_gen(ZXType::XY, Expr(0.2))); + c.at(pc) = {outs.at(1)}; + // Correct PZ with I, Z + cs = {{}, {pc, outs.at(2)}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(pa) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "PZ vertex must be corrected with an X or Y"); + } +} + +SCENARIO("Testing causal flow identification and focussing") { + // Diagram based on Fig. 8, "Determinism in the one-way model", + // Danos & Kashefi 2006 + ZXDiagram diag(2, 2, 0, 0); + ZXVertVec ins = diag.get_boundary(ZXType::Input); + ZXVertVec outs = diag.get_boundary(ZXType::Output); + // Input measurements + ZXVert i0 = diag.add_vertex(ZXType::XY, 0.3); + ZXVert i1 = diag.add_vertex(ZXType::XY, 0.7); + diag.add_wire(ins.at(0), i0); + diag.add_wire(ins.at(1), i1); + // Chain on qubit 0 + ZXVert v0 = diag.add_vertex(ZXType::XY, 1.4); + diag.add_wire(i0, v0, ZXWireType::H); + diag.add_wire(v0, outs.at(0), ZXWireType::H); + // Chain on qubit 1 + ZXVert v1a = diag.add_vertex(ZXType::XY, 0.9); + ZXVert v1b = diag.add_vertex(ZXType::XY, 0.2); + ZXVert v1c = diag.add_vertex(ZXType::XY, 1.2); + ZXVert v1d = diag.add_vertex(ZXType::XY, 1.6); + ZXVert v1e = diag.add_vertex(ZXType::XY, 0.4); + diag.add_wire(i1, v1a, ZXWireType::H); + diag.add_wire(v1a, v1b, ZXWireType::H); + diag.add_wire(v1b, v1c, ZXWireType::H); + diag.add_wire(v1c, v1d, ZXWireType::H); + diag.add_wire(v1d, v1e, ZXWireType::H); + diag.add_wire(v1e, outs.at(1), ZXWireType::H); + // Cross-chain links + diag.add_wire(i0, v1a, ZXWireType::H); + diag.add_wire(i0, v1d, ZXWireType::H); + + Flow f = Flow::identify_causal_flow(diag); + + CHECK(f.c(i0) == ZXVertSeqSet{v0}); + CHECK(f.c(v0) == ZXVertSeqSet{outs.at(0)}); + CHECK(f.c(i1) == ZXVertSeqSet{v1a}); + CHECK(f.c(v1a) == ZXVertSeqSet{v1b}); + CHECK(f.c(v1b) == ZXVertSeqSet{v1c}); + CHECK(f.c(v1c) == ZXVertSeqSet{v1d}); + CHECK(f.c(v1d) == ZXVertSeqSet{v1e}); + CHECK(f.c(v1e) == ZXVertSeqSet{outs.at(1)}); + REQUIRE_NOTHROW(f.verify(diag)); + + REQUIRE_NOTHROW(f.focus(diag)); + CHECK(f.c(i0) == ZXVertSeqSet{v0}); + CHECK(f.c(v0) == ZXVertSeqSet{outs.at(0)}); + CHECK(f.c(i1) == ZXVertSeqSet{v1a, v0, v1c, v1e}); + CHECK(f.c(v1a) == ZXVertSeqSet{v1b, v1d, v0, outs.at(1)}); + CHECK(f.c(v1b) == ZXVertSeqSet{v1c, v1e}); + CHECK(f.c(v1c) == ZXVertSeqSet{v1d, v0, outs.at(1)}); + CHECK(f.c(v1d) == ZXVertSeqSet{v1e}); + CHECK(f.c(v1e) == ZXVertSeqSet{outs.at(1)}); + REQUIRE_NOTHROW(f.verify(diag)); +} + +SCENARIO("Testing Pauli flow identification and focussing") { + // Diagram combines Ex. 2.43, "There and back again: a circuit extraction + // tale", Backens et al. 2021 and Ex. C.13, "Relating measurement patterns to + // circuits via Pauli flow", Simmons 2021 + ZXDiagram diag(1, 3, 0, 0); + ZXVertVec ins = diag.get_boundary(ZXType::Input); + ZXVertVec outs = diag.get_boundary(ZXType::Output); + // Gflow example from Backens et al. + ZXVert ga = diag.add_vertex(ZXType::XY, 0.3); + ZXVert gb = diag.add_vertex(ZXType::XY, 0.7); + ZXVert gc = diag.add_vertex(ZXType::XZ, 1.4); + ZXVert gd = diag.add_vertex(ZXType::YZ, 0.9); + diag.add_wire(ins.at(0), ga); + diag.add_wire(ga, gb, ZXWireType::H); + diag.add_wire(gb, gc, ZXWireType::H); + diag.add_wire(gb, gd, ZXWireType::H); + diag.add_wire(gc, gd, ZXWireType::H); + diag.add_wire(gb, outs.at(0), ZXWireType::H); + // Pauli flow example from Simmons (angles cut to Paulis) + ZXVert pi = diag.add_vertex(ZXType::XY, 0.9); + ZXVert pa = diag.add_vertex(ZXType::PZ); + ZXVert pb = diag.add_vertex(ZXType::PX); + ZXVert pc = diag.add_vertex(ZXType::XY, 0.2); + ZXVert pd = diag.add_vertex(ZXGen::create_gen(ZXType::PY, true)); + diag.add_wire(gc, pi, ZXWireType::H); + diag.add_wire(pi, pb, ZXWireType::H); + diag.add_wire(pa, pb, ZXWireType::H); + diag.add_wire(pa, pc, ZXWireType::H); + diag.add_wire(pa, pd, ZXWireType::H); + diag.add_wire(pb, pd, ZXWireType::H); + diag.add_wire(pc, pd, ZXWireType::H); + diag.add_wire(pc, outs.at(1), ZXWireType::H); + diag.add_wire(pd, outs.at(2), ZXWireType::H); + + Flow f = Flow::identify_pauli_flow(diag); + + REQUIRE_NOTHROW(f.verify(diag)); + REQUIRE_NOTHROW(f.focus(diag)); + REQUIRE_NOTHROW(f.verify(diag)); +} + +SCENARIO("Test focussed set identificaiton") { + // Diagram combines Ex. 2.43, "There and back again: a circuit extraction + // tale", Backens et al. 2021 and Ex. C.13, "Relating measurement patterns to + // circuits via Pauli flow", Simmons 2021 + ZXDiagram diag(1, 3, 0, 0); + ZXVertVec ins = diag.get_boundary(ZXType::Input); + ZXVertVec outs = diag.get_boundary(ZXType::Output); + // Gflow example from Backens et al. + ZXVert ga = diag.add_vertex(ZXType::XY, 0.3); + ZXVert gb = diag.add_vertex(ZXType::XY, 0.7); + ZXVert gc = diag.add_vertex(ZXType::XZ, 1.4); + ZXVert gd = diag.add_vertex(ZXType::YZ, 0.9); + diag.add_wire(ins.at(0), ga); + diag.add_wire(ga, gb, ZXWireType::H); + diag.add_wire(gb, gc, ZXWireType::H); + diag.add_wire(gb, gd, ZXWireType::H); + diag.add_wire(gc, gd, ZXWireType::H); + diag.add_wire(gb, outs.at(0), ZXWireType::H); + // Pauli flow example from Simmons (angles cut to Paulis) + ZXVert pi = diag.add_vertex(ZXType::XY, 0.9); + ZXVert pa = diag.add_vertex(ZXType::PZ); + ZXVert pb = diag.add_vertex(ZXType::PX); + ZXVert pc = diag.add_vertex(ZXType::XY, 0.2); + ZXVert pd = diag.add_vertex(ZXGen::create_gen(ZXType::PY, true)); + diag.add_wire(gc, pi, ZXWireType::H); + diag.add_wire(pi, pb, ZXWireType::H); + diag.add_wire(pa, pb, ZXWireType::H); + diag.add_wire(pa, pc, ZXWireType::H); + diag.add_wire(pa, pd, ZXWireType::H); + diag.add_wire(pb, pd, ZXWireType::H); + diag.add_wire(pc, pd, ZXWireType::H); + diag.add_wire(pc, outs.at(1), ZXWireType::H); + diag.add_wire(pd, outs.at(2), ZXWireType::H); + + std::set focussed = Flow::identify_focussed_sets(diag); + + REQUIRE(focussed.size() == 2); + for (const ZXVertSeqSet& fset : focussed) { + std::map parities; + for (const ZXVert& v : fset.get()) { + ZXType vtype = diag.get_zxtype(v); + REQUIRE( + (vtype == ZXType::Output || vtype == ZXType::XY || + vtype == ZXType::PX || vtype == ZXType::PY)); + for (const ZXVert& n : fset.get()) { + auto inserted = parities.insert({n, 1}); + if (!inserted.second) { + ++(inserted.first->second); + } + } + } + for (const std::pair& p : parities) { + if (p.second % 2 == 1) { + ZXType vtype = diag.get_zxtype(p.first); + REQUIRE(( + vtype == ZXType::Output || vtype == ZXType::XZ || + vtype == ZXType::YZ || vtype == ZXType::PY || vtype == ZXType::PZ)); + REQUIRE( + (vtype != ZXType::PY || + fset.get().find(p.first) != fset.get().end())); + } + } + } +} + +} // namespace test_flow + +} // namespace zx + +} // namespace tket diff --git a/tket/tests/ZX/test_ZXDiagram.cpp b/tket/tests/ZX/test_ZXDiagram.cpp index 1d10149b7c..37a15f4c7e 100644 --- a/tket/tests/ZX/test_ZXDiagram.cpp +++ b/tket/tests/ZX/test_ZXDiagram.cpp @@ -31,7 +31,7 @@ SCENARIO("Testing generator creation") { CHECK_FALSE(input.valid_edge(0, QuantumType::Quantum)); CHECK_FALSE(input.valid_edge(std::nullopt, QuantumType::Classical)); - BasicGen zSpider(ZXType::ZSpider, 0.3, QuantumType::Classical); + PhasedGen zSpider(ZXType::ZSpider, 0.3, QuantumType::Classical); CHECK(zSpider.get_name() == "C-Z(0.3)"); CHECK(zSpider.get_type() == ZXType::ZSpider); CHECK(zSpider.get_qtype() == QuantumType::Classical); @@ -40,7 +40,7 @@ SCENARIO("Testing generator creation") { CHECK(zSpider.valid_edge(std::nullopt, QuantumType::Classical)); CHECK_FALSE(zSpider.valid_edge(0, QuantumType::Quantum)); - BasicGen xSpider(ZXType::XSpider, Expr("2*a"), QuantumType::Quantum); + PhasedGen xSpider(ZXType::XSpider, Expr("2*a"), QuantumType::Quantum); CHECK(xSpider.get_name() == "Q-X(2*a)"); CHECK(xSpider.get_type() == ZXType::XSpider); CHECK(xSpider.get_qtype() == QuantumType::Quantum); @@ -51,9 +51,20 @@ SCENARIO("Testing generator creation") { Sym a = SymEngine::symbol("a"); sub_map[a] = Expr(0.8); CHECK(xSpider.symbol_substitution(sub_map)->get_name() == "Q-X(1.6)"); + CHECK( + *xSpider.symbol_substitution(sub_map) == + PhasedGen(ZXType::XSpider, 1.6, QuantumType::Quantum)); + + CliffordGen px(ZXType::PX, true, QuantumType::Classical); + CHECK(px.get_name() == "C-X(1)"); + CHECK(px.get_type() == ZXType::PX); + CHECK(px.get_param() == true); + CHECK(px.free_symbols().empty()); + CHECK(!(px == CliffordGen(ZXType::PX, false, QuantumType::Quantum))); + CHECK(px == CliffordGen(ZXType::PX, true, QuantumType::Classical)); // Should throw an error: type Triangle is not a BasicGen type - REQUIRE_THROWS_AS(BasicGen(ZXType::Triangle, 0.3), ZXError); + REQUIRE_THROWS_AS(PhasedGen(ZXType::Triangle, 0.3), ZXError); DirectedGen tri(ZXType::Triangle, QuantumType::Classical); CHECK(tri.get_name() == "C-Tri"); diff --git a/tket/tests/test_BoxDecompRoutingMethod.cpp b/tket/tests/test_BoxDecompRoutingMethod.cpp index 41577f9034..a9459cae8d 100644 --- a/tket/tests/test_BoxDecompRoutingMethod.cpp +++ b/tket/tests/test_BoxDecompRoutingMethod.cpp @@ -1,3 +1,16 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include #include "Mapping/BoxDecomposition.hpp" diff --git a/tket/tests/test_CompilerPass.cpp b/tket/tests/test_CompilerPass.cpp index 80311434aa..875fed5251 100644 --- a/tket/tests/test_CompilerPass.cpp +++ b/tket/tests/test_CompilerPass.cpp @@ -236,12 +236,13 @@ SCENARIO("Test making (mostly routing) passes using PassGenerators") { } } GIVEN("Synthesise Passes in a row then routing") { - Circuit circ(4); + Circuit circ(5); circ.add_op(OpType::H, {0}); circ.add_op(OpType::CZ, {0, 1}); circ.add_op(OpType::CH, {0, 2}); circ.add_op(OpType::CnX, {0, 1, 2, 3}); circ.add_op(OpType::CZ, {0, 1}); + circ.add_op(OpType::X, {4}); OpTypeSet ots = {OpType::CX, OpType::TK1, OpType::SWAP}; PredicatePtr gsp = std::make_shared(ots); SquareGrid grid(2, 3); diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index fb00d7294d..006cf65ef3 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -415,7 +415,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { } GIVEN( "Labelling is required, but there are no free remaining qubits, for" - "one updated label, order 0.") { + " one updated label, order 0.") { Circuit circ(9); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[1], qubits[8]}); @@ -432,6 +432,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { std::shared_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); + REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); } GIVEN( @@ -490,14 +491,14 @@ SCENARIO("Test LexiLabellingMethod") { {nodes[2], nodes[3]}, {nodes[3], nodes[4]}}); ArchitecturePtr shared_arc = std::make_shared(architecture); - GIVEN("No qubit to label, empty frontier, check_method.") { + GIVEN("No qubit to label, empty frontier, routing_method false.") { Circuit circ(5); std::shared_ptr mf = std::make_shared(circ); LexiLabellingMethod lrm; - REQUIRE(!lrm.check_method(mf, shared_arc)); + REQUIRE(!lrm.routing_method(mf, shared_arc).first); } - GIVEN("No qubit to label, partially filled frontier, check_method.") { + GIVEN("No qubit to label, partially filled frontier, routing_method false.") { Circuit circ(5); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[0], qubits[4]}); @@ -513,9 +514,9 @@ SCENARIO("Test LexiLabellingMethod") { std::shared_ptr mf = std::make_shared(circ); LexiLabellingMethod lrm; - REQUIRE(!lrm.check_method(mf, shared_arc)); + REQUIRE(!lrm.routing_method(mf, shared_arc).first); } - GIVEN("Qubit to label, but casually restricted, check_method.") { + GIVEN("Qubit to label, but casually restricted, routing_method false.") { Circuit circ(5); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[0], qubits[4]}); @@ -530,11 +531,11 @@ SCENARIO("Test LexiLabellingMethod") { std::shared_ptr mf = std::make_shared(circ); LexiLabellingMethod lrm; - REQUIRE(!lrm.check_method(mf, shared_arc)); + REQUIRE(!lrm.routing_method(mf, shared_arc).first); } GIVEN( "Two Qubit to label in future slice, causally restricted, " - "check_method.") { + "routing_method false.") { Circuit circ(5); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[0], qubits[1]}); @@ -547,9 +548,9 @@ SCENARIO("Test LexiLabellingMethod") { std::shared_ptr mf = std::make_shared(circ); LexiLabellingMethod lrm; - REQUIRE(!lrm.check_method(mf, shared_arc)); + REQUIRE(!lrm.routing_method(mf, shared_arc).first); } - GIVEN("Three Qubit Gate, all labelled, first slice, check_method.") { + GIVEN("Three Qubit Gate, all labelled, first slice, routing_method false.") { Circuit circ(5); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[0], qubits[4]}); @@ -564,7 +565,7 @@ SCENARIO("Test LexiLabellingMethod") { std::shared_ptr mf = std::make_shared(circ); LexiLabellingMethod lrm; - REQUIRE(!lrm.check_method(mf, shared_arc)); + REQUIRE(!lrm.routing_method(mf, shared_arc).first); } GIVEN("One unlabelled qubit, one slice, check and route.") { Circuit circ(5); @@ -579,8 +580,8 @@ SCENARIO("Test LexiLabellingMethod") { VertPort pre_label = mf->quantum_boundary->get().find(qubits[3])->second; LexiLabellingMethod lrm; - REQUIRE(lrm.check_method(mf, shared_arc)); - lrm.routing_method(mf, shared_arc); + std::pair out = lrm.routing_method(mf, shared_arc); + REQUIRE(out.first); REQUIRE( mf->quantum_boundary->get().find(qubits[3]) == mf->quantum_boundary->get().end()); @@ -605,8 +606,9 @@ SCENARIO("Test LexiLabellingMethod") { VertPort pre_label = mf->quantum_boundary->get().find(qubits[2])->second; LexiLabellingMethod lrm; - REQUIRE(lrm.check_method(mf, shared_arc)); - lrm.routing_method(mf, shared_arc); + + std::pair out = lrm.routing_method(mf, shared_arc); + REQUIRE(out.first); REQUIRE( mf->quantum_boundary->get().find(qubits[2]) == mf->quantum_boundary->get().end()); @@ -630,8 +632,8 @@ SCENARIO("Test LexiLabellingMethod") { VertPort pre_label_3 = mf->quantum_boundary->get().find(qubits[3])->second; LexiLabellingMethod lrm; - REQUIRE(lrm.check_method(mf, shared_arc)); - lrm.routing_method(mf, shared_arc); + std::pair out = lrm.routing_method(mf, shared_arc); + REQUIRE(out.first); REQUIRE( mf->quantum_boundary->get().find(qubits[0]) == mf->quantum_boundary->get().end()); @@ -662,8 +664,8 @@ SCENARIO("Test LexiLabellingMethod") { VertPort pre_label_3 = mf->quantum_boundary->get().find(qubits[3])->second; LexiLabellingMethod lrm; - REQUIRE(lrm.check_method(mf, shared_arc)); - lrm.routing_method(mf, shared_arc); + std::pair out = lrm.routing_method(mf, shared_arc); + REQUIRE(out.first); REQUIRE( mf->quantum_boundary->get().find(qubits[2]) == mf->quantum_boundary->get().end()); @@ -696,8 +698,8 @@ SCENARIO("Test LexiLabellingMethod") { VertPort pre_label_3 = mf->quantum_boundary->get().find(qubits[3])->second; LexiLabellingMethod lrm; - REQUIRE(lrm.check_method(mf, shared_arc)); - lrm.routing_method(mf, shared_arc); + std::pair out = lrm.routing_method(mf, shared_arc); + REQUIRE(out.first); REQUIRE( mf->quantum_boundary->get().find(qubits[2]) == mf->quantum_boundary->get().end()); @@ -762,10 +764,10 @@ SCENARIO("Test LexiRouteRoutingMethod") { std::shared_ptr mf = std::make_shared(circ); LexiRouteRoutingMethod lrrm(100); - REQUIRE(lrrm.check_method(mf, shared_arc)); - - unit_map_t init_map = lrrm.routing_method(mf, shared_arc); - REQUIRE(init_map.size() == 0); + std::pair bool_init_map = + lrrm.routing_method(mf, shared_arc); + REQUIRE(bool_init_map.first); + REQUIRE(bool_init_map.second.size() == 0); std::vector commands = mf->circuit_.get_commands(); REQUIRE(commands.size() == 9); @@ -804,8 +806,10 @@ SCENARIO("Test LexiRouteRoutingMethod") { std::shared_ptr mf = std::make_shared(circ); LexiRouteRoutingMethod lrrm(100); - unit_map_t init_map = lrrm.routing_method(mf, shared_arc); - REQUIRE(init_map.size() == 0); + std::pair bool_init_map = + lrrm.routing_method(mf, shared_arc); + REQUIRE(bool_init_map.first); + REQUIRE(bool_init_map.second.size() == 0); std::vector commands = mf->circuit_.get_commands(); REQUIRE(commands.size() == 10); Command swap_c = commands[0]; @@ -870,8 +874,6 @@ SCENARIO("Test MappingManager with LexiRouteRoutingMethod and LexiLabelling") { std::make_shared(lrm), std::make_shared()}; - REQUIRE(vrm[0]->check_method(mf, shared_arc)); - bool res = mm.route_circuit(circ, vrm); PredicatePtr routed_correctly = diff --git a/tket/tests/test_MappingManager.cpp b/tket/tests/test_MappingManager.cpp index 6e9c03a4e9..17edafbead 100644 --- a/tket/tests/test_MappingManager.cpp +++ b/tket/tests/test_MappingManager.cpp @@ -25,12 +25,6 @@ class TokenSwappingTester : public RoutingMethod { public: TokenSwappingTester(){}; - bool check_method( - const std::shared_ptr& /*mapping_frontier*/, - const ArchitecturePtr& /*architecture*/) const { - return true; - } - /** * @param mapping_frontier Contains boundary of routed/unrouted circuit for * modifying @@ -38,11 +32,11 @@ class TokenSwappingTester : public RoutingMethod { * @return Logical to Physical mapping at boundary due to modification. * */ - unit_map_t routing_method( + std::pair routing_method( std::shared_ptr& /*mapping_frontier*/, const ArchitecturePtr& /*architecture*/) const { Node node0("test_node", 0), node1("test_node", 1), node2("test_node", 2); - return {{node0, node1}, {node1, node2}, {node2, node0}}; + return {true, {{node0, node1}, {node1, node2}, {node2, node0}}}; } }; diff --git a/tket/tests/test_MappingVerification.cpp b/tket/tests/test_MappingVerification.cpp index 915ef5c7df..1bb3439d18 100644 --- a/tket/tests/test_MappingVerification.cpp +++ b/tket/tests/test_MappingVerification.cpp @@ -34,6 +34,7 @@ SCENARIO( LinePlacement lp_obj(test_arc); lp_obj.place(circ); MappingManager mm(std::make_shared(test_arc)); + REQUIRE( mm.route_circuit(circ, {std::make_shared()})); CHECK(respects_connectivity_constraints(circ, test_arc, false)); diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index bb08813a73..0921382563 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -278,10 +278,11 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); MultiGateReorderRoutingMethod mrrm; - REQUIRE(mrrm.check_method(mf, shared_arc)); - unit_map_t init_map = mrrm.routing_method(mf, shared_arc); - REQUIRE(init_map.size() == 0); + std::pair bool_init_map = + mrrm.routing_method(mf, shared_arc); + REQUIRE(bool_init_map.first); + REQUIRE(bool_init_map.second.size() == 0); std::vector commands = circ.get_commands(); for (unsigned i = 0; i < 5; i++) { std::vector nodes; @@ -302,10 +303,11 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { std::make_shared(circ2); mf2->advance_frontier_boundary(shared_arc); MultiGateReorderRoutingMethod mrrm2(4, 4); - REQUIRE(mrrm2.check_method(mf2, shared_arc)); - unit_map_t init_map2 = mrrm2.routing_method(mf2, shared_arc); - REQUIRE(init_map2.size() == 0); + std::pair bool_init_map2 = + mrrm2.routing_method(mf2, shared_arc); + REQUIRE(bool_init_map2.first); + REQUIRE(bool_init_map2.second.size() == 0); std::vector commands2 = circ2.get_commands(); for (unsigned i = 0; i < 4; i++) { std::vector nodes; @@ -387,9 +389,7 @@ SCENARIO("Test JSON serialisation for MultiGateReorderRoutingMethod") { GIVEN("RoutingMethod vector") { nlohmann::json j_rms = { - {{"name", "MultiGateReorderRoutingMethod"}, - {"depth", 3}, - {"size", 4}}, + {{"name", "MultiGateReorderRoutingMethod"}, {"depth", 3}, {"size", 4}}, { {"name", "LexiRouteRoutingMethod"}, {"depth", 3}, diff --git a/tket/tests/test_RoutingMethod.cpp b/tket/tests/test_RoutingMethod.cpp index 3cc51cbb0a..6c95b0fe05 100644 --- a/tket/tests/test_RoutingMethod.cpp +++ b/tket/tests/test_RoutingMethod.cpp @@ -15,87 +15,90 @@ SCENARIO("Test RoutingMethod default methods.") { ArchitecturePtr shared_arc = std::make_shared(arc); Circuit circ(3); std::shared_ptr mf = std::make_shared(circ); - REQUIRE(!rm.check_method(mf, shared_arc)); unit_map_t empty; - REQUIRE(rm.routing_method(mf, shared_arc) == empty); + std::pair rm_return = rm.routing_method(mf, shared_arc); + REQUIRE(!rm_return.first); + REQUIRE(rm_return.second == empty); } -// These two method are not completely reflective of what is necessary for -// routing Their design is to minimally test the required features of the -// methods, not to actually succesfully route a circuit -bool test_check_method(const Circuit& c, const ArchitecturePtr& a) { +std::tuple +test_routing_method_mf_swap_perm(const Circuit& c, const ArchitecturePtr& a) { if (c.n_qubits() > 2 && a->n_nodes() > 2) { - return true; + Circuit copy(c); + std::vector qs = copy.all_qubits(); + std::vector ns = a->get_all_nodes_vec(); + // enforce in tests that ns >= qs, this is testing purposes only so fine... + unit_map_t rename_map, final_map; + for (unsigned i = 0; i < qs.size(); i++) { + rename_map.insert({qs[i], ns[i]}); + final_map.insert({ns[i], ns[i]}); + } + copy.rename_units(rename_map); + MappingFrontier mf(copy); + // n.b. add_swap permutes out edge of both boundaries, + mf.add_swap(Node("t", 0), Node("t", 1)); + + return {true, copy, rename_map, final_map}; } else { - return false; + return {false, Circuit(), {}, {}}; } } -std::tuple test_routing_method_mf_swap_perm( +std::tuple +test_routing_method_mf_swap_no_perm( const Circuit& c, const ArchitecturePtr& a) { - Circuit copy(c); - std::vector qs = copy.all_qubits(); - std::vector ns = a->get_all_nodes_vec(); - // enforce in tests that ns >= qs, this is testing purposes only so fine... - unit_map_t rename_map, final_map; - for (unsigned i = 0; i < qs.size(); i++) { - rename_map.insert({qs[i], ns[i]}); - final_map.insert({ns[i], ns[i]}); - } - copy.rename_units(rename_map); - MappingFrontier mf(copy); - // n.b. add_swap permutes out edge of both boundaries, - mf.add_swap(Node("t", 0), Node("t", 1)); - - return std::make_tuple(copy, rename_map, final_map); -} - -std::tuple test_routing_method_mf_swap_no_perm( - const Circuit& c, const ArchitecturePtr& a) { - Circuit copy(c); - std::vector qs = copy.all_qubits(); - std::vector ns = a->get_all_nodes_vec(); - // enforce in tests that ns >= qs, this is testing purposes only so fine... - unit_map_t rename_map, final_map; - for (unsigned i = 0; i < qs.size(); i++) { - rename_map.insert({qs[i], ns[i]}); - final_map.insert({ns[i], ns[i]}); + if (c.n_qubits() > 2 && a->n_nodes() > 2) { + Circuit copy(c); + std::vector qs = copy.all_qubits(); + std::vector ns = a->get_all_nodes_vec(); + // enforce in tests that ns >= qs, this is testing purposes only so fine... + unit_map_t rename_map, final_map; + for (unsigned i = 0; i < qs.size(); i++) { + rename_map.insert({qs[i], ns[i]}); + final_map.insert({ns[i], ns[i]}); + } + copy.rename_units(rename_map); + MappingFrontier mf(copy); + // n.b. add_swap permutes out edge of both boundaries, + mf.add_swap(Node("t", 0), Node("t", 1)); + final_map[Node("t", 0)] = Node("t", 1); + final_map[Node("t", 1)] = Node("t", 0); + + return {true, copy, rename_map, final_map}; + } else { + return {false, Circuit(), {}, {}}; } - copy.rename_units(rename_map); - MappingFrontier mf(copy); - // n.b. add_swap permutes out edge of both boundaries, - mf.add_swap(Node("t", 0), Node("t", 1)); - final_map[Node("t", 0)] = Node("t", 1); - final_map[Node("t", 1)] = Node("t", 0); - - return std::make_tuple(copy, rename_map, final_map); } -std::tuple test_routing_method_circuit_no_perm( +std::tuple +test_routing_method_circuit_no_perm( const Circuit& c, const ArchitecturePtr& a) { - Circuit copy(c.n_qubits()); - copy.add_op(OpType::SWAP, {0, 1}); - copy.add_op(OpType::CX, {1, 0}); - copy.add_op(OpType::CX, {1, 0}); - - std::vector qs = copy.all_qubits(); - std::vector ns = a->get_all_nodes_vec(); - // enforce in tests that ns >= qs, this is testing purposes only so fine... - unit_map_t rename_map, final_map; - for (unsigned i = 0; i < qs.size(); i++) { - rename_map.insert({qs[i], ns[i]}); - final_map.insert({ns[i], ns[i]}); + if (c.n_qubits() > 2 && a->n_nodes() > 2) { + Circuit copy(c.n_qubits()); + copy.add_op(OpType::SWAP, {0, 1}); + copy.add_op(OpType::CX, {1, 0}); + copy.add_op(OpType::CX, {1, 0}); + + std::vector qs = copy.all_qubits(); + std::vector ns = a->get_all_nodes_vec(); + // enforce in tests that ns >= qs, this is testing purposes only so fine... + unit_map_t rename_map, final_map; + for (unsigned i = 0; i < qs.size(); i++) { + rename_map.insert({qs[i], ns[i]}); + final_map.insert({ns[i], ns[i]}); + } + copy.rename_units(rename_map); + MappingFrontier mf(copy); + final_map[Node("t", 0)] = Node("t", 1); + final_map[Node("t", 1)] = Node("t", 0); + return {true, copy, rename_map, final_map}; + } else { + return {false, Circuit(), {}, {}}; } - copy.rename_units(rename_map); - MappingFrontier mf(copy); - final_map[Node("t", 0)] = Node("t", 1); - final_map[Node("t", 1)] = Node("t", 0); - return std::make_tuple(copy, rename_map, final_map); } -SCENARIO("Test RoutingMethodCircuit::check_method") { - RoutingMethodCircuit rmc( - test_routing_method_mf_swap_no_perm, test_check_method, 5, 5); +SCENARIO("Test RoutingMethodCircuit checking criteria") { + RoutingMethodCircuit rmc(test_routing_method_mf_swap_no_perm, 5, 5); Circuit c(2), circ3(3); c.add_op(OpType::CX, {0, 1}); circ3.add_op(OpType::CX, {0, 2}); @@ -108,27 +111,29 @@ SCENARIO("Test RoutingMethodCircuit::check_method") { {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); ArchitecturePtr shared_arc = std::make_shared(arc); - REQUIRE(!rmc.check_method(mf2, shared_arc)); - REQUIRE(rmc.check_method(mf3, shared_arc)); + std::pair res0 = rmc.routing_method(mf2, shared_arc); + REQUIRE(!res0.first); + std::pair res1 = rmc.routing_method(mf3, shared_arc); + REQUIRE(res1.first); } -SCENARIO("Test RoutingMethodCircuit::route_method") { - Circuit comp(2); +SCENARIO("Test RoutingMethodCircuit::routing_method") { + Circuit comp(3); comp.add_op(OpType::SWAP, {0, 1}); comp.add_op(OpType::CX, {1, 0}); comp.add_op(OpType::CX, {1, 0}); comp.add_op(OpType::CX, {1, 0}); comp.add_op(OpType::CX, {1, 0}); auto qbs = comp.all_qubits(); - unit_map_t rename_map = {{qbs[0], Node("t", 0)}, {qbs[1], Node("t", 1)}}; + unit_map_t rename_map = { + {qbs[0], Node("t", 0)}, {qbs[1], Node("t", 1)}, {qbs[2], Node("t", 2)}}; comp.rename_units(rename_map); qubit_map_t permutation = { {Node("t", 0), Node("t", 1)}, {Node("t", 1), Node("t", 0)}}; comp.permute_boundary_output(permutation); GIVEN("Non-implicit Permutation method, using MappingFrontier::add_swap") { - RoutingMethodCircuit rmc( - test_routing_method_mf_swap_no_perm, test_check_method, 2, 2); - Circuit c(2); + RoutingMethodCircuit rmc(test_routing_method_mf_swap_no_perm, 2, 2); + Circuit c(3); c.add_op(OpType::CX, {0, 1}); c.add_op(OpType::CX, {0, 1}); c.add_op(OpType::CX, {0, 1}); @@ -138,14 +143,15 @@ SCENARIO("Test RoutingMethodCircuit::route_method") { Architecture arc( {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); ArchitecturePtr shared_arc = std::make_shared(arc); - unit_map_t output = rmc.routing_method(mf, shared_arc), empty; - REQUIRE(output == empty); + std::pair output = rmc.routing_method(mf, shared_arc); + unit_map_t empty; + REQUIRE(output.first); + REQUIRE(output.second == empty); REQUIRE(c == comp); } GIVEN("Non-implicit Permutation method, using circuit replacement") { - RoutingMethodCircuit rmc( - test_routing_method_circuit_no_perm, test_check_method, 2, 2); - Circuit c(2); + RoutingMethodCircuit rmc(test_routing_method_circuit_no_perm, 2, 2); + Circuit c(3); c.add_op(OpType::CX, {0, 1}); c.add_op(OpType::CX, {0, 1}); c.add_op(OpType::CX, {0, 1}); @@ -155,14 +161,15 @@ SCENARIO("Test RoutingMethodCircuit::route_method") { Architecture arc( {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); ArchitecturePtr shared_arc = std::make_shared(arc); - unit_map_t output = rmc.routing_method(mf, shared_arc), empty; - REQUIRE(output == empty); + std::pair output = rmc.routing_method(mf, shared_arc); + unit_map_t empty; + REQUIRE(output.first); + REQUIRE(output.second == empty); REQUIRE(c == comp); } GIVEN("Implicit Permutation method, using MappingFrontier::add_swap") { - RoutingMethodCircuit rmc( - test_routing_method_mf_swap_perm, test_check_method, 2, 2); - Circuit c(2); + RoutingMethodCircuit rmc(test_routing_method_mf_swap_perm, 2, 2); + Circuit c(3); c.add_op(OpType::CX, {0, 1}); c.add_op(OpType::CX, {0, 1}); c.add_op(OpType::CX, {0, 1}); @@ -172,17 +179,20 @@ SCENARIO("Test RoutingMethodCircuit::route_method") { Architecture arc( {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); ArchitecturePtr shared_arc = std::make_shared(arc); - unit_map_t output = rmc.routing_method(mf, shared_arc), empty; - REQUIRE(output == empty); + std::pair output = rmc.routing_method(mf, shared_arc); + unit_map_t empty; + REQUIRE(output.first); + REQUIRE(output.second == empty); - Circuit comp1(2); + Circuit comp1(3); comp1.add_op(OpType::SWAP, {0, 1}); comp1.add_op(OpType::CX, {1, 0}); comp1.add_op(OpType::CX, {1, 0}); comp1.add_op(OpType::CX, {0, 1}); comp1.add_op(OpType::CX, {0, 1}); qbs = comp1.all_qubits(); - rename_map = {{qbs[0], Node("t", 0)}, {qbs[1], Node("t", 1)}}; + rename_map = { + {qbs[0], Node("t", 0)}, {qbs[1], Node("t", 1)}, {qbs[2], Node("t", 2)}}; comp1.rename_units(rename_map); REQUIRE(c == comp1); diff --git a/tket/tests/test_RoutingPasses.cpp b/tket/tests/test_RoutingPasses.cpp index 642c0c1f33..2986fb182d 100644 --- a/tket/tests/test_RoutingPasses.cpp +++ b/tket/tests/test_RoutingPasses.cpp @@ -449,8 +449,8 @@ SCENARIO( // circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); // circ.add_conditional_gate( // OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); - // circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, 1); - // circ.add_conditional_gate( + // circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, + // 1); circ.add_conditional_gate( // OpType::CX, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); // circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); // } diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index ce832b2506..2ad677c57d 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -430,9 +430,13 @@ SCENARIO("Test RoutingMethod serializations") { RoutingMethod loaded_rm_j = rm_j.get(); Circuit c(2, 2); - CHECK(!loaded_rm_j.check_method( - std::make_shared(c), - std::make_shared(2, 2))); + c.add_op(OpType::CX, {0, 1}); + + MappingFrontier mf(c); + std::shared_ptr mf_sp = + std::make_shared(mf); + CHECK(!loaded_rm_j.routing_method(mf_sp, std::make_shared(2, 2)) + .first); std::vector rmp = { std::make_shared(rm), @@ -442,15 +446,12 @@ SCENARIO("Test RoutingMethod serializations") { nlohmann::json rmp_j = rmp; std::vector loaded_rmp_j = rmp_j.get>(); - CHECK(!loaded_rmp_j[0]->check_method( - std::make_shared(c), - std::make_shared(2, 2))); - CHECK(!loaded_rmp_j[1]->check_method( - std::make_shared(c), - std::make_shared(2, 2))); - CHECK(loaded_rmp_j[2]->check_method( - std::make_shared(c), - std::make_shared(2, 2))); + CHECK(!loaded_rmp_j[0] + ->routing_method(mf_sp, std::make_shared(2, 2)) + .first); + CHECK(loaded_rmp_j[1] + ->routing_method(mf_sp, std::make_shared(2, 2)) + .first); } SCENARIO("Test predicate serializations") { @@ -622,6 +623,7 @@ SCENARIO("Test compiler pass serializations") { COMPPASSJSONTEST(PlacementPass, gen_placement_pass(place)) // TKET-1419 COMPPASSJSONTEST(NoiseAwarePlacement, gen_placement_pass(na_place)) + COMPPASSJSONTEST(NaivePlacementPass, gen_naive_placement_pass(arc)) #undef COMPPASSJSONTEST GIVEN("RoutingPass") { // Can only be applied to placed circuits diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 1d4b71a2b6..caace6107c 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -20,18 +20,10 @@ set(TEST_SOURCES # We should test simpler modules (e.g. Op, Circuit) before # the more complicated things that rely on them (e.g. Routing, # Transform) to help identify exactly where stuff breaks - ${TKET_TESTS_DIR}/tests_main.cpp - ${TKET_TESTS_DIR}/testutil.cpp - ${TKET_TESTS_DIR}/CircuitsForTesting.cpp ${TKET_TESTS_DIR}/Utils/test_CosSinDecomposition.cpp ${TKET_TESTS_DIR}/Utils/test_HelperFunctions.cpp ${TKET_TESTS_DIR}/Utils/test_MatrixAnalysis.cpp ${TKET_TESTS_DIR}/Utils/test_RNG.cpp - ${TKET_TESTS_DIR}/Graphs/EdgeSequence.cpp - ${TKET_TESTS_DIR}/Graphs/EdgeSequenceColouringParameters.cpp - ${TKET_TESTS_DIR}/Graphs/GraphTestingRoutines.cpp - ${TKET_TESTS_DIR}/Graphs/RandomGraphGeneration.cpp - ${TKET_TESTS_DIR}/Graphs/RandomPlanarGraphs.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphColouring.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphFindComponents.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphFindMaxClique.cpp @@ -42,25 +34,11 @@ set(TEST_SOURCES # NOTE: For testing TokenSwapping, it is easier to make use of # Architecture to set up test problems, rather than trying # to separate TokenSwapping-without-Architecture tests. - ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedCompleteSolutions.cpp - ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedSwapSequences.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/NeighboursFromEdges.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/PermutationTestUtils.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/BestTsaTester.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DebugFunctions.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DecodedProblemData.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/FullTsaTesting.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/GetRandomSet.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/PartialTsaTesting.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ProblemGeneration.cpp - ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/TestStatsStructs.cpp ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/test_DebugFunctions.cpp ${TKET_TESTS_DIR}/TokenSwapping/TSAUtils/test_SwapFunctions.cpp ${TKET_TESTS_DIR}/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp @@ -78,9 +56,7 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/Ops/test_ClassicalOps.cpp ${TKET_TESTS_DIR}/Ops/test_Expression.cpp ${TKET_TESTS_DIR}/Ops/test_Ops.cpp - ${TKET_TESTS_DIR}/Gate/GatesData.cpp ${TKET_TESTS_DIR}/Gate/test_GateUnitaryMatrix.cpp - ${TKET_TESTS_DIR}/Simulation/ComparisonFunctions.cpp ${TKET_TESTS_DIR}/Simulation/test_CircuitSimulator.cpp ${TKET_TESTS_DIR}/Simulation/test_PauliExpBoxUnitaryCalculator.cpp ${TKET_TESTS_DIR}/Circuit/test_Boxes.cpp @@ -94,7 +70,8 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/test_PauliGraph.cpp ${TKET_TESTS_DIR}/test_Architectures.cpp ${TKET_TESTS_DIR}/test_ArchitectureAwareSynthesis.cpp - ${TKET_TESTS_DIR}/test_Placement.cpp + ${TKET_TESTS_DIR}/Placement/test_Placement.cpp + ${TKET_TESTS_DIR}/Placement/test_NeighbourPlacements.cpp ${TKET_TESTS_DIR}/test_MappingVerification.cpp ${TKET_TESTS_DIR}/test_MappingFrontier.cpp ${TKET_TESTS_DIR}/test_RoutingMethod.cpp @@ -127,4 +104,5 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/ZX/test_ZXDiagram.cpp ${TKET_TESTS_DIR}/ZX/test_ZXAxioms.cpp ${TKET_TESTS_DIR}/ZX/test_ZXSimp.cpp + ${TKET_TESTS_DIR}/ZX/test_Flow.cpp ) diff --git a/tket/tests/tkettestutilsfiles.cmake b/tket/tests/tkettestutilsfiles.cmake new file mode 100644 index 0000000000..bedc380ec1 --- /dev/null +++ b/tket/tests/tkettestutilsfiles.cmake @@ -0,0 +1,43 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# file to store all the files that serve as utils for the tket unit tests +# new files should be added here + +set(TESTUTILS_SOURCES + ${TKET_TESTS_DIR}/tests_main.cpp + ${TKET_TESTS_DIR}/testutil.cpp + ${TKET_TESTS_DIR}/CircuitsForTesting.cpp + ${TKET_TESTS_DIR}/Graphs/EdgeSequence.cpp + ${TKET_TESTS_DIR}/Graphs/EdgeSequenceColouringParameters.cpp + ${TKET_TESTS_DIR}/Graphs/GraphTestingRoutines.cpp + ${TKET_TESTS_DIR}/Graphs/RandomGraphGeneration.cpp + ${TKET_TESTS_DIR}/Graphs/RandomPlanarGraphs.cpp + ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedCompleteSolutions.cpp + ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedSwapSequences.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/NeighboursFromEdges.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/PermutationTestUtils.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/BestTsaTester.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DebugFunctions.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DecodedProblemData.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/FullTsaTesting.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/GetRandomSet.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/PartialTsaTesting.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ProblemGeneration.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/TestStatsStructs.cpp + ${TKET_TESTS_DIR}/Gate/GatesData.cpp + ${TKET_TESTS_DIR}/Simulation/ComparisonFunctions.cpp +) From 80914ba405543ce378f2f61ac3c581fb22b8b968 Mon Sep 17 00:00:00 2001 From: cqc-melf <70640934+cqc-melf@users.noreply.github.com> Date: Fri, 25 Feb 2022 18:36:39 +0100 Subject: [PATCH 141/146] [TKET-1778] add architecture operation valid changes (#248) * compiler pytket * add tests and checks in python * black format python * update format c++ * c++ format * get optype only once * remove optype from valid_operation * remove optype deps * remove 3 node case --- pytket/binders/architecture.cpp | 8 +++ pytket/tests/architecture_test.py | 27 ++++++++- tket/src/Architecture/Architecture.cpp | 25 ++------- tket/src/Architecture/CMakeLists.txt | 4 -- .../include/Architecture/Architecture.hpp | 3 +- tket/src/Mapping/MappingFrontier.cpp | 55 ++++++++++++++++++- tket/src/Mapping/MultiGateReorder.cpp | 4 +- .../include/Mapping/MappingFrontier.hpp | 11 ++++ tket/tests/test_MultiGateReorder.cpp | 29 ++++++---- 9 files changed, 123 insertions(+), 43 deletions(-) diff --git a/pytket/binders/architecture.cpp b/pytket/binders/architecture.cpp index 823bc1455d..10c46ee3a1 100644 --- a/pytket/binders/architecture.cpp +++ b/pytket/binders/architecture.cpp @@ -19,8 +19,11 @@ #include #include +#include "Circuit/Circuit.hpp" #include "Utils/Json.hpp" #include "binder_json.hpp" +#include "binder_utils.hpp" +#include "typecast.hpp" namespace py = pybind11; using json = nlohmann::json; @@ -56,6 +59,11 @@ PYBIND11_MODULE(architecture, m) { "given two nodes in Architecture, " "returns distance between them", py::arg("node_0"), py::arg("node_1")) + .def( + "valid_operation", &Architecture::valid_operation, + "Returns true if the given operation acting on the given ", + " nodes can be executed on the Architecture connectivity graph.", + py::arg("uids")) .def( "get_adjacent_nodes", &Architecture::get_neighbour_nodes, "given a node, returns adjacent nodes in Architecture.", diff --git a/pytket/tests/architecture_test.py b/pytket/tests/architecture_test.py index 7d476f6426..804a28bf82 100644 --- a/pytket/tests/architecture_test.py +++ b/pytket/tests/architecture_test.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytket.circuit import Node # type: ignore +from pytket.circuit import Node, Op, OpType, Circuit, Qubit, PhasePolyBox # type: ignore from pytket.architecture import Architecture, SquareGrid, FullyConnected # type: ignore +import numpy as np def test_architectures() -> None: @@ -78,8 +79,32 @@ def test_arch_types() -> None: assert isinstance(sg, SquareGrid) +def test_valid_operation() -> None: + edges = [(0, 1), (1, 2), (2, 0), (0, 3), (3, 4), (4, 5), (5, 6)] + arc = Architecture(edges) + + assert arc.valid_operation([Node(0), Node(1)]) + assert not arc.valid_operation([Node(1), Node(3)]) + assert arc.valid_operation([Node(0)]) + + assert arc.valid_operation([Node(0)]) + assert arc.valid_operation([Node(0), Node(1)]) + assert not arc.valid_operation([Node(0), Node(1), Node(2)]) + + assert arc.valid_operation([Node(0)]) + assert not arc.valid_operation([Node(10)]) + assert not arc.valid_operation([Node(10), Node(11), Node(15)]) + assert not arc.valid_operation([Node(0), Node(1), Node(2), Node(3)]) + assert not arc.valid_operation([Node(0), Node(4)]) + assert not arc.valid_operation([Node(0), Node(1), Node(2)]) + assert arc.valid_operation([Node(0), Node(1)]) + assert not arc.valid_operation([Node(0), Node(1), Node(2)]) + assert not arc.valid_operation([Node(0), Node(1), Node(4)]) + + if __name__ == "__main__": test_architectures() test_architecture_eq() test_fully_connected() test_arch_types() + test_valid_operation() diff --git a/tket/src/Architecture/Architecture.cpp b/tket/src/Architecture/Architecture.cpp index db925c99b0..fc577e5f5d 100644 --- a/tket/src/Architecture/Architecture.cpp +++ b/tket/src/Architecture/Architecture.cpp @@ -18,37 +18,20 @@ #include #include -#include "Circuit/Conditional.hpp" #include "Graphs/ArticulationPoints.hpp" #include "Utils/Json.hpp" #include "Utils/UnitID.hpp" namespace tket { -// basic implementation that works off same prior assumptions -// TODO: Update this for more mature systems of multi-qubit gates -bool Architecture::valid_operation( - const Op_ptr& op, const std::vector& uids) const { - if (op->get_desc().is_box() || - (op->get_type() == OpType::Conditional && - static_cast(*op).get_op()->get_desc().is_box())) { - return false; +bool Architecture::valid_operation(const std::vector& uids) const { + for (Node n : uids) { + if (!this->node_exists(Node(n))) return false; } if (uids.size() == 1) { - // with current Architecture can assume all single qubit gates valid - return true; - } else if (op->get_type() == OpType::Barrier) { return true; } else if (uids.size() == 2) { - if (this->node_exists(uids[0]) && this->node_exists(uids[1]) && - this->bidirectional_edge_exists(uids[0], uids[1])) { - return true; - } - } else if (uids.size() == 3 && op->get_type() == OpType::BRIDGE) { - bool con_0_exists = this->bidirectional_edge_exists(uids[0], uids[1]); - bool con_1_exists = this->bidirectional_edge_exists(uids[2], uids[1]); - if (this->node_exists(uids[0]) && this->node_exists(uids[1]) && - this->node_exists(uids[2]) && con_0_exists && con_1_exists) { + if (this->bidirectional_edge_exists(uids[0], uids[1])) { return true; } } diff --git a/tket/src/Architecture/CMakeLists.txt b/tket/src/Architecture/CMakeLists.txt index f69f7a7f8f..ff18aaaa3d 100644 --- a/tket/src/Architecture/CMakeLists.txt +++ b/tket/src/Architecture/CMakeLists.txt @@ -27,11 +27,7 @@ add_library(tket-${COMP} NeighboursFromArchitecture.cpp) list(APPEND DEPS_${COMP} - Circuit - OpType - Ops Graphs - OpType TokenSwapping Utils) diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index e20ea15873..8015634e50 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -23,7 +23,6 @@ #include "Graphs/CompleteGraph.hpp" #include "Graphs/DirectedGraph.hpp" -#include "Ops/OpPtr.hpp" #include "Utils/BiMapHeaders.hpp" #include "Utils/EigenConfig.hpp" #include "Utils/Json.hpp" @@ -107,7 +106,7 @@ class Architecture : public ArchitectureBase> { * Returns true if the given operation acting on the given nodes * can be executed on the Architecture connectivity graph. */ - bool valid_operation(const Op_ptr &op, const std::vector &uids) const; + bool valid_operation(const std::vector &uids) const; /** * Sub-architecture generated by a subset of nodes. diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index 9e9fb9f3ea..5490792cb5 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -242,8 +242,9 @@ void MappingFrontier::advance_frontier_boundary( for (const UnitID& uid : uids) { nodes.push_back(Node(uid)); } - if (architecture->valid_operation( - this->circuit_.get_Op_ptr_from_Vertex(vert), nodes)) { + if (this->valid_boundary_operation( + architecture, this->circuit_.get_Op_ptr_from_Vertex(vert), + nodes)) { // if no valid operation, boundary not updated and while loop terminates boundary_updated = true; for (const UnitID& uid : uids) { @@ -585,4 +586,54 @@ void MappingFrontier::merge_ancilla( this->bimaps_->final.left.erase(merge); } +bool MappingFrontier::valid_boundary_operation( + const ArchitecturePtr& architecture, const Op_ptr& op, + const std::vector& uids) const { + // boxes are never allowed + OpType ot = op->get_type(); + if (is_box_type(ot)) { + return false; + } + + if (ot == OpType::Conditional) { + OpType cond_ot = static_cast(*op).get_op()->get_type(); + // conditional boxes are never allowed, too + if (is_box_type(cond_ot)) { + return false; + } + } + + // Barriers are allways allowed + if (ot == OpType::Barrier) { + return true; + } + + // this currently allows unplaced single qubits gates + // this should be changes in the future + if (uids.size() == 1) { + return true; + } + + // allow two qubit gates only for placed and connected nodes + if (uids.size() == 2) { + if (architecture->node_exists(uids[0]) && + architecture->node_exists(uids[1]) && + architecture->bidirectional_edge_exists(uids[0], uids[1])) { + return true; + } + } else if (uids.size() == 3 && ot == OpType::BRIDGE) { + bool con_0_exists = + architecture->bidirectional_edge_exists(uids[0], uids[1]); + bool con_1_exists = + architecture->bidirectional_edge_exists(uids[2], uids[1]); + if (architecture->node_exists(uids[0]) && + architecture->node_exists(uids[1]) && + architecture->node_exists(uids[2]) && con_0_exists && con_1_exists) { + return true; + } + } + + return false; +} + } // namespace tket diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index 27f521d962..4cad6a9a7d 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -67,8 +67,8 @@ static bool is_physically_permitted( for (port_t port = 0; port < frontier->circuit_.n_ports(vert); ++port) { nodes.push_back(Node(get_unitid_from_vertex_port(frontier, {vert, port}))); } - return arc_ptr->valid_operation( - frontier->circuit_.get_Op_ptr_from_Vertex(vert), nodes); + return frontier->valid_boundary_operation( + arc_ptr, frontier->circuit_.get_Op_ptr_from_Vertex(vert), nodes); } // This method will try to commute a vertex to the quantum frontier diff --git a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp index 5fdbc695fc..5e1e5daee1 100644 --- a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp +++ b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp @@ -179,6 +179,17 @@ struct MappingFrontier { * @param new_boundary Object to reassign with. */ void set_quantum_boundary(const unit_vertport_frontier_t& new_boundary); + + /** + * Returns true if the given operation acting on the given nodes + * can be executed on the Architecture connectivity graph. + * @param architecture given architecture to check the operation on + * @param op operation to check + * @param uids vector of nodes which is included in the operation + */ + bool valid_boundary_operation( + const ArchitecturePtr& architecture, const Op_ptr& op, + const std::vector& uids) const; }; } // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index 0921382563..61fb5c9a86 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -57,7 +57,8 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -98,7 +99,8 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -144,7 +146,8 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -191,7 +194,8 @@ SCENARIO("Reorder circuits") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -232,11 +236,11 @@ SCENARIO("Reorder circuits with limited search space") { mr.solve(3, 3); // Check only the first valid CZ get commuted to the front std::vector commands = circ.get_commands(); - REQUIRE(shared_arc->valid_operation( - commands[0].get_op_ptr(), + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[0].get_op_ptr(), {Node(commands[0].get_args()[0]), Node(commands[0].get_args()[1])})); - REQUIRE(!shared_arc->valid_operation( - commands[0].get_op_ptr(), + REQUIRE(!mf->valid_boundary_operation( + shared_arc, commands[0].get_op_ptr(), {Node(commands[1].get_args()[0]), Node(commands[1].get_args()[1])})); const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -289,7 +293,8 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { for (auto arg : commands[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands[i].get_op_ptr(), nodes)); + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[i].get_op_ptr(), nodes)); } const auto u = tket_sim::get_unitary(circ); const auto u1 = tket_sim::get_unitary(circ_copy); @@ -314,13 +319,15 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { for (auto arg : commands2[i].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(shared_arc->valid_operation(commands2[i].get_op_ptr(), nodes)); + REQUIRE(mf2->valid_boundary_operation( + shared_arc, commands2[i].get_op_ptr(), nodes)); } std::vector nodes; for (auto arg : commands2[4].get_args()) { nodes.push_back(Node(arg)); } - REQUIRE(!shared_arc->valid_operation(commands2[4].get_op_ptr(), nodes)); + REQUIRE(!mf2->valid_boundary_operation( + shared_arc, commands2[4].get_op_ptr(), nodes)); const auto u2 = tket_sim::get_unitary(circ2); REQUIRE(tket_sim::compare_statevectors_or_unitaries( u2, u1, tket_sim::MatrixEquivalence::EQUAL)); From cefa92c71e6d76825aec22cb1154a82782666f70 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 28 Feb 2022 09:12:49 +0000 Subject: [PATCH 142/146] Update build_and_test.yml --- .github/workflows/build_and_test.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 67f255b11f..522c5f2e41 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -5,11 +5,9 @@ on: branches: - main - develop - - feature/RV3.1 push: branches: - develop - - feature/RV3.1 schedule: # 03:00 every Saturday morning - cron: '0 3 * * 6' From 967bcf075c2fd238ee946468779bdebe1d07f562 Mon Sep 17 00:00:00 2001 From: sjdilkes Date: Mon, 28 Feb 2022 14:34:14 +0000 Subject: [PATCH 143/146] address PR comments --- pytket/binders/architecture.cpp | 3 +- pytket/binders/passes.cpp | 4 +- pytket/docs/changelog.rst | 6 +- pytket/tests/architecture_test.py | 7 -- tket/src/Circuit/macro_circ_info.cpp | 7 +- tket/src/Mapping/BoxDecomposition.cpp | 4 +- tket/src/Mapping/LexiLabelling.cpp | 2 +- tket/src/Mapping/LexiRoute.cpp | 53 ++++++----- tket/src/Mapping/LexiRouteRoutingMethod.cpp | 2 +- tket/src/Mapping/MappingFrontier.cpp | 11 ++- tket/src/Mapping/MappingManager.cpp | 2 +- tket/src/Mapping/MultiGateReorder.cpp | 11 ++- tket/src/Mapping/RoutingMethodCircuit.cpp | 2 +- .../include/Mapping/BoxDecomposition.hpp | 6 +- .../Mapping/include/Mapping/LexiLabelling.hpp | 2 +- .../src/Mapping/include/Mapping/LexiRoute.hpp | 23 +++-- .../Mapping/LexiRouteRoutingMethod.hpp | 2 +- .../include/Mapping/MappingFrontier.hpp | 2 + .../include/Mapping/MultiGateReorder.hpp | 6 +- .../Mapping/include/Mapping/RoutingMethod.hpp | 2 +- .../include/Mapping/RoutingMethodCircuit.hpp | 2 +- tket/tests/test_BoxDecompRoutingMethod.cpp | 9 +- tket/tests/test_LexiRoute.cpp | 89 +++++++------------ tket/tests/test_MappingManager.cpp | 2 +- tket/tests/test_MultiGateReorder.cpp | 24 ++--- tket/tests/test_RoutingMethod.cpp | 13 ++- tket/tests/test_RoutingPasses.cpp | 31 ------- tket/tests/test_json.cpp | 3 +- 28 files changed, 135 insertions(+), 195 deletions(-) diff --git a/pytket/binders/architecture.cpp b/pytket/binders/architecture.cpp index 10c46ee3a1..891a79d6a6 100644 --- a/pytket/binders/architecture.cpp +++ b/pytket/binders/architecture.cpp @@ -62,7 +62,8 @@ PYBIND11_MODULE(architecture, m) { .def( "valid_operation", &Architecture::valid_operation, "Returns true if the given operation acting on the given ", - " nodes can be executed on the Architecture connectivity graph.", + "nodes can be executed on the Architecture connectivity graph." + "\n\n:param uids: UnitID validity is being checked for", py::arg("uids")) .def( "get_adjacent_nodes", &Architecture::get_neighbour_nodes, diff --git a/pytket/binders/passes.cpp b/pytket/binders/passes.cpp index 5762faad01..b055137001 100644 --- a/pytket/binders/passes.cpp +++ b/pytket/binders/passes.cpp @@ -37,7 +37,7 @@ static PassPtr gen_cx_mapping_pass_kwargs( const Architecture &arc, const PlacementPtr &placer, py::kwargs kwargs) { std::vector config = { std::make_shared(), - std::make_shared(100)}; + std::make_shared()}; if (kwargs.contains("config")) { config = py::cast>(kwargs["config"]); } @@ -55,7 +55,7 @@ static PassPtr gen_cx_mapping_pass_kwargs( static PassPtr gen_default_routing_pass(const Architecture &arc) { std::vector config = { std::make_shared(), - std::make_shared(100)}; + std::make_shared()}; return gen_routing_pass(arc, config); } diff --git a/pytket/docs/changelog.rst b/pytket/docs/changelog.rst index 7a210b9b86..9e7b133dba 100644 --- a/pytket/docs/changelog.rst +++ b/pytket/docs/changelog.rst @@ -35,7 +35,11 @@ Minor new features: * New ``pytket.passes.NaivePlacementPass`` which completes a basic relabelling of all Circuit Qubit not labelled as some Architecture Node to any available Architecture Node * Add ``opgroups`` property to ``Circuit``. - +* ``Architecture`` has new ``valid_operations`` method which returns true if passed UnitID respect + architecture constraints. +* New methods for mapping logical to physical circuits for some ``Architecture``: ``LexiRouteRoutingMethod`` + ``LexiLabellingMethod``, ``MultiGateReorderRoutingMethod``. + 0.19.2 (February 2022) ---------------------- diff --git a/pytket/tests/architecture_test.py b/pytket/tests/architecture_test.py index 804a28bf82..d41740554b 100644 --- a/pytket/tests/architecture_test.py +++ b/pytket/tests/architecture_test.py @@ -83,22 +83,15 @@ def test_valid_operation() -> None: edges = [(0, 1), (1, 2), (2, 0), (0, 3), (3, 4), (4, 5), (5, 6)] arc = Architecture(edges) - assert arc.valid_operation([Node(0), Node(1)]) assert not arc.valid_operation([Node(1), Node(3)]) - assert arc.valid_operation([Node(0)]) - assert arc.valid_operation([Node(0)]) assert arc.valid_operation([Node(0), Node(1)]) assert not arc.valid_operation([Node(0), Node(1), Node(2)]) - - assert arc.valid_operation([Node(0)]) assert not arc.valid_operation([Node(10)]) assert not arc.valid_operation([Node(10), Node(11), Node(15)]) assert not arc.valid_operation([Node(0), Node(1), Node(2), Node(3)]) assert not arc.valid_operation([Node(0), Node(4)]) assert not arc.valid_operation([Node(0), Node(1), Node(2)]) - assert arc.valid_operation([Node(0), Node(1)]) - assert not arc.valid_operation([Node(0), Node(1), Node(2)]) assert not arc.valid_operation([Node(0), Node(1), Node(4)]) diff --git a/tket/src/Circuit/macro_circ_info.cpp b/tket/src/Circuit/macro_circ_info.cpp index 76b35732ec..25f1b9a37a 100644 --- a/tket/src/Circuit/macro_circ_info.cpp +++ b/tket/src/Circuit/macro_circ_info.cpp @@ -531,14 +531,13 @@ CutFrontier Circuit::next_q_cut( for (const std::pair& pair : u_frontier->get()) { Vertex try_v = target(pair.second); if (detect_final_Op(try_v)) continue; - if (next_slice_lookup.find(try_v) != next_slice_lookup.end()) + if (next_slice_lookup.contains(try_v)) continue; // already going to be in next slice - bool good_vertex = bad_vertices.find(try_v) == bad_vertices.end(); + bool good_vertex = !bad_vertices.contains(try_v); if (!good_vertex) continue; EdgeVec ins = get_in_edges(try_v); for (const Edge& in : ins) { - if (edge_lookup.find(in) == edge_lookup.end() && - get_edgetype(in) == EdgeType::Quantum) { + if (!edge_lookup.contains(in) && get_edgetype(in) == EdgeType::Quantum) { good_vertex = false; bad_vertices.insert(try_v); break; diff --git a/tket/src/Mapping/BoxDecomposition.cpp b/tket/src/Mapping/BoxDecomposition.cpp index 38d22fce71..efd6bdd365 100644 --- a/tket/src/Mapping/BoxDecomposition.cpp +++ b/tket/src/Mapping/BoxDecomposition.cpp @@ -19,7 +19,7 @@ namespace tket { BoxDecomposition::BoxDecomposition( const ArchitecturePtr &_architecture, - std::shared_ptr &_mapping_frontier) + MappingFrontier_ptr &_mapping_frontier) : architecture_(_architecture), mapping_frontier_(_mapping_frontier) {} bool BoxDecomposition::solve() { @@ -56,7 +56,7 @@ bool BoxDecomposition::solve() { BoxDecompositionRoutingMethod::BoxDecompositionRoutingMethod(){}; std::pair BoxDecompositionRoutingMethod::routing_method( - std::shared_ptr &mapping_frontier, + MappingFrontier_ptr &mapping_frontier, const ArchitecturePtr &architecture) const { BoxDecomposition bd(architecture, mapping_frontier); bool modified = bd.solve(); diff --git a/tket/src/Mapping/LexiLabelling.cpp b/tket/src/Mapping/LexiLabelling.cpp index 7455614bb2..00be47608b 100644 --- a/tket/src/Mapping/LexiLabelling.cpp +++ b/tket/src/Mapping/LexiLabelling.cpp @@ -16,7 +16,7 @@ namespace tket { std::pair LexiLabellingMethod::routing_method( - std::shared_ptr& mapping_frontier, + MappingFrontier_ptr& mapping_frontier, const ArchitecturePtr& architecture) const { LexiRoute lr(architecture, mapping_frontier); return {lr.solve_labelling(), {}}; diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp index adfcf1f8af..f4beb4251a 100644 --- a/tket/src/Mapping/LexiRoute.cpp +++ b/tket/src/Mapping/LexiRoute.cpp @@ -21,7 +21,7 @@ namespace tket { LexiRoute::LexiRoute( const ArchitecturePtr& _architecture, - std::shared_ptr& _mapping_frontier) + MappingFrontier_ptr& _mapping_frontier) : architecture_(_architecture), mapping_frontier_(_mapping_frontier) { // set initial logical->physical labelling for (const Qubit& qb : this->mapping_frontier_->circuit_.all_qubits()) { @@ -178,7 +178,8 @@ bool LexiRoute::update_labelling() { * of UnitID in this->mapping_frontier_ */ bool LexiRoute::set_interacting_uids( - bool assigned_only, bool route_check, bool label_check) { + AssignedOnly assigned_only, CheckRoutingValidity route_check, + CheckLabellingValidity label_check) { // return types this->interacting_uids_.clear(); bool all_placed = true; @@ -214,10 +215,11 @@ bool LexiRoute::set_interacting_uids( this->architecture_->node_exists(Node(jt->first)); if (!node0_exists || !node1_exists || op->get_desc().is_box()) { all_placed = false; - if (route_check) return false; + if (route_check == CheckRoutingValidity::Yes) return false; } - if (!assigned_only || (node0_exists && node1_exists)) { + if (assigned_only == AssignedOnly::No || + (node0_exists && node1_exists)) { interacting_uids_.insert({it->first, jt->first}); interacting_uids_.insert({jt->first, it->first}); } @@ -228,29 +230,16 @@ bool LexiRoute::set_interacting_uids( n_edges > 2 && this->mapping_frontier_->circuit_.get_OpType_from_Vertex(v0) != OpType::Barrier) { - if (label_check) return true; - if (route_check) return false; + if (label_check == CheckLabellingValidity::Yes) return true; + if (route_check == CheckRoutingValidity::Yes) return false; throw LexiRouteError( "LexiRoute only supports non-Barrier vertices with 1 or 2 edges."); } } } - - // conditions for proceeding with labelling - if (label_check) { - if (all_placed) { - return true; - } else { - return false; - } - } - // this should have left early when first found - if (route_check) { - if (all_placed) { - return true; - } else { - return false; - } + if (label_check == CheckLabellingValidity::Yes || + route_check == CheckRoutingValidity::Yes) { + return all_placed; } // => either route_check true and all_placed so valid // or !route_check and !label_check so return true and discard @@ -347,7 +336,9 @@ std::pair LexiRoute::check_bridge( this->mapping_frontier_->advance_next_2qb_slice(lookahead); // true bool means it only sets interacting uids if both uids are in // architecture - this->set_interacting_uids(true); + this->set_interacting_uids( + AssignedOnly::Yes, CheckRoutingValidity::No, + CheckLabellingValidity::No); // if 0, just take first swap rather than place if (this->interacting_uids_.size() == 0) { candidate_swaps = {*candidate_swaps.begin()}; @@ -431,7 +422,8 @@ void LexiRoute::remove_swaps_decreasing(swap_set_t& swaps) { } bool LexiRoute::solve_labelling() { - bool all_labelled = this->set_interacting_uids(false, false, true); + bool all_labelled = this->set_interacting_uids( + AssignedOnly::No, CheckRoutingValidity::No, CheckLabellingValidity::Yes); if (!all_labelled) { this->update_labelling(); this->mapping_frontier_->update_quantum_boundary_uids(this->labelling_); @@ -443,7 +435,8 @@ bool LexiRoute::solve_labelling() { bool LexiRoute::solve(unsigned lookahead) { // work out if valid - bool all_labelled = this->set_interacting_uids(false, true, false); + bool all_labelled = this->set_interacting_uids( + AssignedOnly::No, CheckRoutingValidity::Yes, CheckLabellingValidity::No); if (!all_labelled) { return false; } @@ -481,7 +474,9 @@ bool LexiRoute::solve(unsigned lookahead) { this->mapping_frontier_->advance_next_2qb_slice(lookahead); // true bool means it only sets interacting uids if both uids are in // architecture - this->set_interacting_uids(true); + this->set_interacting_uids( + AssignedOnly::Yes, CheckRoutingValidity::No, + CheckLabellingValidity::No); } // find best swap auto it = candidate_swaps.end(); @@ -490,7 +485,8 @@ bool LexiRoute::solve(unsigned lookahead) { std::pair chosen_swap = *it; this->mapping_frontier_->set_quantum_boundary(copy); - this->set_interacting_uids(); + this->set_interacting_uids( + AssignedOnly::No, CheckRoutingValidity::No, CheckLabellingValidity::No); std::pair check = this->check_bridge(chosen_swap, lookahead); // set for final time, to allow gates to be correctly inserted, but then leave // as is @@ -502,7 +498,8 @@ bool LexiRoute::solve(unsigned lookahead) { this->mapping_frontier_->add_swap(chosen_swap.first, chosen_swap.second); } else { // only need to reset in bridge case - this->set_interacting_uids(); + this->set_interacting_uids( + AssignedOnly::No, CheckRoutingValidity::No, CheckLabellingValidity::No); auto add_ordered_bridge = [&](const Node& n) { auto it0 = this->mapping_frontier_->quantum_boundary->find(n); diff --git a/tket/src/Mapping/LexiRouteRoutingMethod.cpp b/tket/src/Mapping/LexiRouteRoutingMethod.cpp index 9ec5b05f1d..672ed670cd 100644 --- a/tket/src/Mapping/LexiRouteRoutingMethod.cpp +++ b/tket/src/Mapping/LexiRouteRoutingMethod.cpp @@ -20,7 +20,7 @@ LexiRouteRoutingMethod::LexiRouteRoutingMethod(unsigned _max_depth) : max_depth_(_max_depth){}; std::pair LexiRouteRoutingMethod::routing_method( - std::shared_ptr& mapping_frontier, + MappingFrontier_ptr& mapping_frontier, const ArchitecturePtr& architecture) const { LexiRoute lr(architecture, mapping_frontier); return {lr.solve(this->max_depth_), {}}; diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp index 5490792cb5..7ed60d5fde 100644 --- a/tket/src/Mapping/MappingFrontier.cpp +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -596,10 +596,15 @@ bool MappingFrontier::valid_boundary_operation( } if (ot == OpType::Conditional) { - OpType cond_ot = static_cast(*op).get_op()->get_type(); + Op_ptr cond_op_ptr = static_cast(*op).get_op(); // conditional boxes are never allowed, too - if (is_box_type(cond_ot)) { - return false; + OpType ot = cond_op_ptr->get_type(); + while (ot == OpType::Conditional) { + cond_op_ptr = static_cast(*op).get_op(); + ot = cond_op_ptr->get_type(); + if (is_box_type(ot)) { + return false; + } } } diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp index 7ae89751af..cf14d25c4d 100644 --- a/tket/src/Mapping/MappingManager.cpp +++ b/tket/src/Mapping/MappingManager.cpp @@ -43,7 +43,7 @@ bool MappingManager::route_circuit_with_maps( // mapping_frontier tracks boundary between routed & un-routed in circuit // when initialised, boundary is over output edges of input vertices - std::shared_ptr mapping_frontier; + MappingFrontier_ptr mapping_frontier; if (maps) { mapping_frontier = std::make_shared(circuit, maps); } else { diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp index 4cad6a9a7d..9ed429c7a5 100644 --- a/tket/src/Mapping/MultiGateReorder.cpp +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -20,7 +20,7 @@ namespace tket { MultiGateReorder::MultiGateReorder( const ArchitecturePtr &_architecture, - std::shared_ptr &_mapping_frontier) + MappingFrontier_ptr &_mapping_frontier) : architecture_(_architecture), mapping_frontier_(_mapping_frontier) { // This needs to be updated every time the frontier changes this->u_frontier_edges_ = @@ -31,8 +31,7 @@ MultiGateReorder::MultiGateReorder( // Traverse the DAG to the quantum frontier // to find the UnitID associated with an VertPort static UnitID get_unitid_from_vertex_port( - const std::shared_ptr &frontier, - const VertPort &vert_port) { + const MappingFrontier_ptr &frontier, const VertPort &vert_port) { VertPort current_vert_port = vert_port; while (true) { auto it = @@ -61,8 +60,8 @@ static bool is_multiq_quantum_gate(const Circuit &circ, const Vertex &vert) { } static bool is_physically_permitted( - const std::shared_ptr &frontier, - const ArchitecturePtr &arc_ptr, const Vertex &vert) { + const MappingFrontier_ptr &frontier, const ArchitecturePtr &arc_ptr, + const Vertex &vert) { std::vector nodes; for (port_t port = 0; port < frontier->circuit_.n_ports(vert); ++port) { nodes.push_back(Node(get_unitid_from_vertex_port(frontier, {vert, port}))); @@ -239,7 +238,7 @@ MultiGateReorderRoutingMethod::MultiGateReorderRoutingMethod( : max_depth_(_max_depth), max_size_(_max_size) {} std::pair MultiGateReorderRoutingMethod::routing_method( - std::shared_ptr &mapping_frontier, + MappingFrontier_ptr &mapping_frontier, const ArchitecturePtr &architecture) const { MultiGateReorder mr(architecture, mapping_frontier); return {mr.solve(this->max_depth_, this->max_size_), {}}; diff --git a/tket/src/Mapping/RoutingMethodCircuit.cpp b/tket/src/Mapping/RoutingMethodCircuit.cpp index 6657dfacad..de6b400455 100644 --- a/tket/src/Mapping/RoutingMethodCircuit.cpp +++ b/tket/src/Mapping/RoutingMethodCircuit.cpp @@ -26,7 +26,7 @@ RoutingMethodCircuit::RoutingMethodCircuit( max_depth_(_max_depth){}; std::pair RoutingMethodCircuit::routing_method( - std::shared_ptr& mapping_frontier, + MappingFrontier_ptr& mapping_frontier, const ArchitecturePtr& architecture) const { // Produce subcircuit and circuit Subcircuit frontier_subcircuit = mapping_frontier->get_frontier_subcircuit( diff --git a/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp index dc4a165797..e194d00e81 100644 --- a/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp +++ b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp @@ -28,7 +28,7 @@ class BoxDecomposition { */ BoxDecomposition( const ArchitecturePtr& _architecture, - std::shared_ptr& _mapping_frontier); + MappingFrontier_ptr& _mapping_frontier); /** * Decompose any boxes in the next slice after the frontier @@ -40,7 +40,7 @@ class BoxDecomposition { private: // Architecture all new physical operations must respect ArchitecturePtr architecture_; - std::shared_ptr mapping_frontier_; + MappingFrontier_ptr mapping_frontier_; }; class BoxDecompositionRoutingMethod : public RoutingMethod { @@ -58,7 +58,7 @@ class BoxDecompositionRoutingMethod : public RoutingMethod { * */ std::pair routing_method( - std::shared_ptr& mapping_frontier, + MappingFrontier_ptr& mapping_frontier, const ArchitecturePtr& architecture) const override; nlohmann::json serialize() const override; diff --git a/tket/src/Mapping/include/Mapping/LexiLabelling.hpp b/tket/src/Mapping/include/Mapping/LexiLabelling.hpp index 17a50ce31b..9d5268e19b 100644 --- a/tket/src/Mapping/include/Mapping/LexiLabelling.hpp +++ b/tket/src/Mapping/include/Mapping/LexiLabelling.hpp @@ -36,7 +36,7 @@ class LexiLabellingMethod : public RoutingMethod { * */ std::pair routing_method( - std::shared_ptr& mapping_frontier, + MappingFrontier_ptr& mapping_frontier, const ArchitecturePtr& architecture) const override; nlohmann::json serialize() const override; diff --git a/tket/src/Mapping/include/Mapping/LexiRoute.hpp b/tket/src/Mapping/include/Mapping/LexiRoute.hpp index 1871f1293c..d711d10376 100644 --- a/tket/src/Mapping/include/Mapping/LexiRoute.hpp +++ b/tket/src/Mapping/include/Mapping/LexiRoute.hpp @@ -43,7 +43,7 @@ class LexiRoute { */ LexiRoute( const ArchitecturePtr& _architecture, - std::shared_ptr& _mapping_frontier); + MappingFrontier_ptr& _mapping_frontier); /** * When called, LexiRoute::solve will modify the Circuit held in @@ -71,18 +71,31 @@ class LexiRoute { bool solve_labelling(); private: + /** Only considers two-qubit vertices if both qubits are labelled to + * Architecture */ + enum class AssignedOnly { Yes, No }; + /** Returns a bool confirming if vertices are valid for LexiRoute::solve */ + enum class CheckRoutingValidity { Yes, No }; + /** Returns a bool confirming if vertices are valid for + * LexiRoute::solve_labelling */ + enum class CheckLabellingValidity { Yes, No }; + /** * this->interacting_uids_ attribute is a map where key is one UnitID * and value is the UnitID it needs to be adjacent to. * This map is implicitly updated whenever a logical SWAP is inserted. * set_interacting_uids determines this map for the first parallel set of * interacting UnitID in the Circuit held in this->mapping_frontier_ - * @param assigned_only If true, only include interactions where both UnitID + * @param assigned_only If Yes, only include interactions where both UnitID * are in this->architecture_. + * @param route_check If Yes, return false if solve not possible + * @param label_check If Yes, return false if solve_labelling not possible + * + * @return bool depending on ENUM conditions */ bool set_interacting_uids( - bool assigned_only = false, bool route_check = false, - bool label_check = false); + AssignedOnly assigned_only, CheckRoutingValidity route_check, + CheckLabellingValidity label_check); /** * If there is some "free" Node in Architecture at distance "distances" on @@ -158,7 +171,7 @@ class LexiRoute { // Architecture all new physical operations must respect ArchitecturePtr architecture_; // Contains circuit for finding SWAP from and non-routed/routed boundary - std::shared_ptr& mapping_frontier_; + MappingFrontier_ptr& mapping_frontier_; // Map between UnitID and UnitID they interact with at boundary unit_map_t interacting_uids_; // Map between original circuit UnitID and new UnitID due to dynamic diff --git a/tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp b/tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp index c490d4dadf..be13fc51a9 100644 --- a/tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp +++ b/tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp @@ -39,7 +39,7 @@ class LexiRouteRoutingMethod : public RoutingMethod { * */ std::pair routing_method( - std::shared_ptr& mapping_frontier, + MappingFrontier_ptr& mapping_frontier, const ArchitecturePtr& architecture) const override; /** diff --git a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp index 5e1e5daee1..d6278fe9ed 100644 --- a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp +++ b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp @@ -192,4 +192,6 @@ struct MappingFrontier { const std::vector& uids) const; }; +typedef std::shared_ptr MappingFrontier_ptr; + } // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp index 63434fb383..cb5162f97c 100644 --- a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp +++ b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp @@ -28,7 +28,7 @@ class MultiGateReorder { */ MultiGateReorder( const ArchitecturePtr& _architecture, - std::shared_ptr& _mapping_frontier); + MappingFrontier_ptr& _mapping_frontier); /** * Try to commute any multi-qubit gates to the quantum frontier @@ -42,7 +42,7 @@ class MultiGateReorder { private: // Architecture all new physical operations must respect ArchitecturePtr architecture_; - std::shared_ptr mapping_frontier_; + MappingFrontier_ptr mapping_frontier_; EdgeVec u_frontier_edges_; }; @@ -66,7 +66,7 @@ class MultiGateReorderRoutingMethod : public RoutingMethod { * */ std::pair routing_method( - std::shared_ptr& mapping_frontier, + MappingFrontier_ptr& mapping_frontier, const ArchitecturePtr& architecture) const override; nlohmann::json serialize() const override; diff --git a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp index de4e440ebf..ac29cbaf37 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp @@ -43,7 +43,7 @@ class RoutingMethod { * */ virtual std::pair routing_method( - std::shared_ptr& /*mapping_frontier*/, + MappingFrontier_ptr& /*mapping_frontier*/, const ArchitecturePtr& /*architecture*/) const { return {false, {}}; } diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp index 3189bb6504..ea5de4455e 100644 --- a/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp +++ b/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp @@ -43,7 +43,7 @@ class RoutingMethodCircuit : public RoutingMethod { * */ std::pair routing_method( - std::shared_ptr& mapping_frontier, + MappingFrontier_ptr& mapping_frontier, const ArchitecturePtr& architecture) const; private: diff --git a/tket/tests/test_BoxDecompRoutingMethod.cpp b/tket/tests/test_BoxDecompRoutingMethod.cpp index a9459cae8d..00d4b19323 100644 --- a/tket/tests/test_BoxDecompRoutingMethod.cpp +++ b/tket/tests/test_BoxDecompRoutingMethod.cpp @@ -47,8 +47,7 @@ SCENARIO("Decompose boxes") { {qubits[3], nodes[3]}}; circ.rename_units(rename_map); Circuit circ_copy(circ); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); BoxDecomposition bd(shared_arc, mf); bd.solve(); const auto u = tket_sim::get_unitary(circ); @@ -73,8 +72,7 @@ SCENARIO("Decompose boxes") { {qubits[2], nodes[2]}, {qubits[3], nodes[3]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); BoxDecomposition bd(shared_arc, mf); bd.solve(); std::vector commands = mf->circuit_.get_commands(); @@ -105,8 +103,7 @@ SCENARIO("Decompose boxes") { {qubits[2], nodes[2]}, {qubits[3], nodes[3]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); MappingManager mm(shared_arc); std::vector vrm = { diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp index 006cf65ef3..8402f24dc5 100644 --- a/tket/tests/test_LexiRoute.cpp +++ b/tket/tests/test_LexiRoute.cpp @@ -61,8 +61,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, {qubits[3], nodes[3]}, {qubits[4], nodes[6]}, {qubits[5], nodes[5]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); lr.solve(4); @@ -91,8 +90,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[3], nodes[3]}, {qubits[5], nodes[5]}}; circ.rename_units(rename_map); - std::shared_ptr mf0 = - std::make_shared(circ); + MappingFrontier_ptr mf0 = std::make_shared(circ); LexiRoute lr(shared_arc, mf0); lr.solve_labelling(); @@ -101,8 +99,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { rename_map = {{qubits[4], nodes[6]}}; mf0->circuit_.rename_units(rename_map); - std::shared_ptr mf1 = - std::make_shared(circ); + MappingFrontier_ptr mf1 = std::make_shared(circ); LexiRoute lr1(shared_arc, mf1); lr1.solve(4); @@ -130,8 +127,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); lr.solve(4); @@ -154,8 +150,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { circ.add_op(OpType::CX, {qubits[0], qubits[3]}); circ.add_op(OpType::CX, {qubits[3], qubits[4]}); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiRoute lr0(shared_arc, mf); lr0.solve_labelling(); std::vector commands = mf->circuit_.get_commands(); @@ -195,8 +190,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[3], nodes[7]}, {qubits[4], nodes[2]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); LexiRoute lr(shared_arc, mf); @@ -219,8 +213,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[3], nodes[7]}, {qubits[4], nodes[2]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); LexiRoute lr(shared_arc, mf); @@ -240,8 +233,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[3], nodes[7]}, {qubits[4], nodes[2]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); LexiRoute lr(shared_arc, mf); @@ -261,8 +253,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[3], nodes[7]}, {qubits[4], nodes[2]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); LexiRoute lr(shared_arc, mf); @@ -293,8 +284,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[0], nodes[2]}, {qubits[1], nodes[4]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); LexiRoute lr0(shared_arc, mf); lr0.solve(20); @@ -334,8 +324,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[0], nodes[2]}, {qubits[1], nodes[4]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); LexiRoute lr0(shared_arc, mf); lr0.solve_labelling(); @@ -370,8 +359,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[0], nodes[2]}, {qubits[1], nodes[4]}, {qubits[3], nodes[3]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->ancilla_nodes_.insert(nodes[3]); mf->advance_frontier_boundary(shared_arc); @@ -401,8 +389,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, {qubits[3], nodes[3]}, {qubits[4], nodes[6]}, {qubits[5], nodes[5]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); lr.solve(4); @@ -429,15 +416,14 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); } GIVEN( "Labelling is required, but there are no free remaining qubits, for " - " one updated label, order 1.") { + "one updated label, order 1.") { Circuit circ(9); std::vector qubits = circ.all_qubits(); circ.add_op(OpType::CX, {qubits[1], qubits[8]}); @@ -451,8 +437,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); } @@ -472,8 +457,7 @@ SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiRoute lr(shared_arc, mf); REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); } @@ -493,8 +477,7 @@ SCENARIO("Test LexiLabellingMethod") { ArchitecturePtr shared_arc = std::make_shared(architecture); GIVEN("No qubit to label, empty frontier, routing_method false.") { Circuit circ(5); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiLabellingMethod lrm; REQUIRE(!lrm.routing_method(mf, shared_arc).first); } @@ -511,8 +494,7 @@ SCENARIO("Test LexiLabellingMethod") { {qubits[3], nodes[3]}, {qubits[4], nodes[4]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiLabellingMethod lrm; REQUIRE(!lrm.routing_method(mf, shared_arc).first); } @@ -528,8 +510,7 @@ SCENARIO("Test LexiLabellingMethod") { {qubits[2], nodes[2]}, {qubits[4], nodes[4]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiLabellingMethod lrm; REQUIRE(!lrm.routing_method(mf, shared_arc).first); } @@ -545,8 +526,7 @@ SCENARIO("Test LexiLabellingMethod") { std::map rename_map = { {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiLabellingMethod lrm; REQUIRE(!lrm.routing_method(mf, shared_arc).first); } @@ -562,8 +542,7 @@ SCENARIO("Test LexiLabellingMethod") { {qubits[3], nodes[3]}, {qubits[4], nodes[4]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiLabellingMethod lrm; REQUIRE(!lrm.routing_method(mf, shared_arc).first); } @@ -575,8 +554,7 @@ SCENARIO("Test LexiLabellingMethod") { std::map rename_map = { {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); VertPort pre_label = mf->quantum_boundary->get().find(qubits[3])->second; LexiLabellingMethod lrm; @@ -601,8 +579,7 @@ SCENARIO("Test LexiLabellingMethod") { std::map rename_map = { {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[3], nodes[3]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); VertPort pre_label = mf->quantum_boundary->get().find(qubits[2])->second; LexiLabellingMethod lrm; @@ -625,8 +602,7 @@ SCENARIO("Test LexiLabellingMethod") { std::map rename_map = { {qubits[2], nodes[2]}, {qubits[1], nodes[1]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); VertPort pre_label_0 = mf->quantum_boundary->get().find(qubits[0])->second; VertPort pre_label_3 = @@ -657,8 +633,7 @@ SCENARIO("Test LexiLabellingMethod") { std::map rename_map = { {qubits[4], nodes[4]}, {qubits[1], nodes[1]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); VertPort pre_label_0 = mf->quantum_boundary->get().find(qubits[2])->second; VertPort pre_label_3 = @@ -691,8 +666,7 @@ SCENARIO("Test LexiLabellingMethod") { std::map rename_map = { {qubits[4], nodes[4]}, {qubits[1], nodes[1]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); VertPort pre_label_0 = mf->quantum_boundary->get().find(qubits[2])->second; VertPort pre_label_3 = @@ -761,8 +735,7 @@ SCENARIO("Test LexiRouteRoutingMethod") { {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiRouteRoutingMethod lrrm(100); std::pair bool_init_map = lrrm.routing_method(mf, shared_arc); @@ -803,8 +776,7 @@ SCENARIO("Test LexiRouteRoutingMethod") { {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); LexiRouteRoutingMethod lrrm(100); std::pair bool_init_map = lrrm.routing_method(mf, shared_arc); @@ -866,8 +838,7 @@ SCENARIO("Test MappingManager with LexiRouteRoutingMethod and LexiLabelling") { PassPtr dec = gen_decompose_routing_gates_to_cxs_pass(architecture, false); MappingManager mm(shared_arc); - std::shared_ptr mf = - std::make_shared(copy_circ); + MappingFrontier_ptr mf = std::make_shared(copy_circ); LexiLabellingMethod lrm; std::vector vrm = { diff --git a/tket/tests/test_MappingManager.cpp b/tket/tests/test_MappingManager.cpp index 17edafbead..10e74a0f43 100644 --- a/tket/tests/test_MappingManager.cpp +++ b/tket/tests/test_MappingManager.cpp @@ -33,7 +33,7 @@ class TokenSwappingTester : public RoutingMethod { * */ std::pair routing_method( - std::shared_ptr& /*mapping_frontier*/, + MappingFrontier_ptr& /*mapping_frontier*/, const ArchitecturePtr& /*architecture*/) const { Node node0("test_node", 0), node1("test_node", 1), node2("test_node", 2); return {true, {{node0, node1}, {node1, node2}, {node2, node0}}}; diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp index 61fb5c9a86..276ee7f970 100644 --- a/tket/tests/test_MultiGateReorder.cpp +++ b/tket/tests/test_MultiGateReorder.cpp @@ -46,8 +46,7 @@ SCENARIO("Reorder circuits") { {qubits[3], nodes[3]}}; circ.rename_units(rename_map); Circuit circ_copy(circ); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); MultiGateReorder mr(shared_arc, mf); mr.solve(20, 20); @@ -88,8 +87,7 @@ SCENARIO("Reorder circuits") { {qubits[3], nodes[3]}}; circ.rename_units(rename_map); Circuit circ_copy(circ); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); MultiGateReorder mr(shared_arc, mf); mr.solve(20, 20); @@ -135,8 +133,7 @@ SCENARIO("Reorder circuits") { circ.rename_units(rename_map); Circuit circ_copy(circ); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); MultiGateReorder mr(shared_arc, mf); mr.solve(20, 20); @@ -183,8 +180,7 @@ SCENARIO("Reorder circuits") { circ.rename_units(rename_map); Circuit circ_copy(circ); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); MultiGateReorder mr(shared_arc, mf); mr.solve(20, 20); @@ -229,8 +225,7 @@ SCENARIO("Reorder circuits with limited search space") { {qubits[3], nodes[3]}}; circ.rename_units(rename_map); Circuit circ_copy(circ); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); MultiGateReorder mr(shared_arc, mf); mr.solve(3, 3); @@ -278,8 +273,7 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { {qubits[3], nodes[3]}}; circ.rename_units(rename_map); Circuit circ_copy(circ); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); mf->advance_frontier_boundary(shared_arc); MultiGateReorderRoutingMethod mrrm; @@ -304,8 +298,7 @@ SCENARIO("Test MultiGateReorderRoutingMethod") { // Test with limits Circuit circ2(circ_copy); - std::shared_ptr mf2 = - std::make_shared(circ2); + MappingFrontier_ptr mf2 = std::make_shared(circ2); mf2->advance_frontier_boundary(shared_arc); MultiGateReorderRoutingMethod mrrm2(4, 4); @@ -360,8 +353,7 @@ SCENARIO("Test MappingManager with MultiGateReorderRoutingMethod") { {qubits[2], nodes[2]}, {qubits[3], nodes[3]}}; circ.rename_units(rename_map); - std::shared_ptr mf = - std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); MappingManager mm(shared_arc); // MultiGateReorderRoutingMethod should first commute the last two gates // then only one swap is needed. diff --git a/tket/tests/test_RoutingMethod.cpp b/tket/tests/test_RoutingMethod.cpp index 6c95b0fe05..5c47a988c2 100644 --- a/tket/tests/test_RoutingMethod.cpp +++ b/tket/tests/test_RoutingMethod.cpp @@ -14,7 +14,7 @@ SCENARIO("Test RoutingMethod default methods.") { {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); ArchitecturePtr shared_arc = std::make_shared(arc); Circuit circ(3); - std::shared_ptr mf = std::make_shared(circ); + MappingFrontier_ptr mf = std::make_shared(circ); unit_map_t empty; std::pair rm_return = rm.routing_method(mf, shared_arc); REQUIRE(!rm_return.first); @@ -103,9 +103,8 @@ SCENARIO("Test RoutingMethodCircuit checking criteria") { c.add_op(OpType::CX, {0, 1}); circ3.add_op(OpType::CX, {0, 2}); circ3.add_op(OpType::CX, {2, 1}); - std::shared_ptr mf2 = std::make_shared(c); - std::shared_ptr mf3 = - std::make_shared(circ3); + MappingFrontier_ptr mf2 = std::make_shared(c); + MappingFrontier_ptr mf3 = std::make_shared(circ3); Architecture arc( {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); @@ -139,7 +138,7 @@ SCENARIO("Test RoutingMethodCircuit::routing_method") { c.add_op(OpType::CX, {0, 1}); c.add_op(OpType::CX, {0, 1}); - std::shared_ptr mf = std::make_shared(c); + MappingFrontier_ptr mf = std::make_shared(c); Architecture arc( {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); ArchitecturePtr shared_arc = std::make_shared(arc); @@ -157,7 +156,7 @@ SCENARIO("Test RoutingMethodCircuit::routing_method") { c.add_op(OpType::CX, {0, 1}); c.add_op(OpType::CX, {0, 1}); - std::shared_ptr mf = std::make_shared(c); + MappingFrontier_ptr mf = std::make_shared(c); Architecture arc( {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); ArchitecturePtr shared_arc = std::make_shared(arc); @@ -175,7 +174,7 @@ SCENARIO("Test RoutingMethodCircuit::routing_method") { c.add_op(OpType::CX, {0, 1}); c.add_op(OpType::CX, {0, 1}); - std::shared_ptr mf = std::make_shared(c); + MappingFrontier_ptr mf = std::make_shared(c); Architecture arc( {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); ArchitecturePtr shared_arc = std::make_shared(arc); diff --git a/tket/tests/test_RoutingPasses.cpp b/tket/tests/test_RoutingPasses.cpp index 2986fb182d..b3973a6b22 100644 --- a/tket/tests/test_RoutingPasses.cpp +++ b/tket/tests/test_RoutingPasses.cpp @@ -436,37 +436,6 @@ SCENARIO( Transforms::decompose_BRIDGE_to_CX().apply(circ); REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); } - // GIVEN( - // "A large circuit, with a mixture of conditional CX and CX gates with " - // "multiple classical wires, non conditional CX and, single qubit " - // "gates, and a directed architecture.") { - // SquareGrid arc(10, 4, 2); - // Circuit circ(60, 10); - // for (unsigned i = 0; i < 58; i++) { - // circ.add_op(OpType::CX, {i, i + 1}); - // circ.add_conditional_gate( - // OpType::CX, {}, {i + 2, i}, {0, 2, 3, 5}, 1); - // circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); - // circ.add_conditional_gate( - // OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); - // circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, - // 1); circ.add_conditional_gate( - // OpType::CX, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); - // circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); - // } - // MappingManager mm(std::make_shared(arc)); - // REQUIRE(mm.route_circuit( - // circ, {std::make_shared(), - // std::make_shared()})); - - // std::cout << "route "<< std::endl; - // Transforms::decompose_SWAP_to_CX().apply(circ); - // REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); - // Transforms::decompose_BRIDGE_to_CX().apply(circ); - // REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); - // Transforms::decompose_CX_directed(arc).apply(circ); - // REQUIRE(respects_connectivity_constraints(circ, arc, true, true)); - // } } SCENARIO( diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index 2ad677c57d..beab6979e5 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -433,8 +433,7 @@ SCENARIO("Test RoutingMethod serializations") { c.add_op(OpType::CX, {0, 1}); MappingFrontier mf(c); - std::shared_ptr mf_sp = - std::make_shared(mf); + MappingFrontier_ptr mf_sp = std::make_shared(mf); CHECK(!loaded_rm_j.routing_method(mf_sp, std::make_shared(2, 2)) .first); From cd9999606515398b7d4cc1022da00ed5af2a0b5c Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Mon, 28 Feb 2022 15:19:57 +0000 Subject: [PATCH 144/146] Update pytket/docs/changelog.rst Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> --- pytket/docs/changelog.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytket/docs/changelog.rst b/pytket/docs/changelog.rst index 9e7b133dba..3fcc18c9f7 100644 --- a/pytket/docs/changelog.rst +++ b/pytket/docs/changelog.rst @@ -35,7 +35,7 @@ Minor new features: * New ``pytket.passes.NaivePlacementPass`` which completes a basic relabelling of all Circuit Qubit not labelled as some Architecture Node to any available Architecture Node * Add ``opgroups`` property to ``Circuit``. -* ``Architecture`` has new ``valid_operations`` method which returns true if passed UnitID respect +* ``Architecture`` has new ``valid_operation`` method which returns true if passed UnitIDs that respect architecture constraints. * New methods for mapping logical to physical circuits for some ``Architecture``: ``LexiRouteRoutingMethod`` ``LexiLabellingMethod``, ``MultiGateReorderRoutingMethod``. From 2a068ab370755c5a8a12d2c896da12c4352de2b9 Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Mon, 28 Feb 2022 15:20:01 +0000 Subject: [PATCH 145/146] Update pytket/binders/architecture.cpp Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> --- pytket/binders/architecture.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytket/binders/architecture.cpp b/pytket/binders/architecture.cpp index 891a79d6a6..e587c3b455 100644 --- a/pytket/binders/architecture.cpp +++ b/pytket/binders/architecture.cpp @@ -63,7 +63,7 @@ PYBIND11_MODULE(architecture, m) { "valid_operation", &Architecture::valid_operation, "Returns true if the given operation acting on the given ", "nodes can be executed on the Architecture connectivity graph." - "\n\n:param uids: UnitID validity is being checked for", + "\n\n:param uids: list of UnitIDs validity is being checked for", py::arg("uids")) .def( "get_adjacent_nodes", &Architecture::get_neighbour_nodes, From 22dda0bcebbcd0d10446ecc5e05a9f1422a4f2aa Mon Sep 17 00:00:00 2001 From: Silas Dilkes <36165522+sjdilkes@users.noreply.github.com> Date: Mon, 28 Feb 2022 15:20:18 +0000 Subject: [PATCH 146/146] Update pytket/docs/changelog.rst Co-authored-by: Alec Edgington <54802828+cqc-alec@users.noreply.github.com> --- pytket/docs/changelog.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytket/docs/changelog.rst b/pytket/docs/changelog.rst index 3fcc18c9f7..0375a545ac 100644 --- a/pytket/docs/changelog.rst +++ b/pytket/docs/changelog.rst @@ -37,7 +37,7 @@ Minor new features: * Add ``opgroups`` property to ``Circuit``. * ``Architecture`` has new ``valid_operation`` method which returns true if passed UnitIDs that respect architecture constraints. -* New methods for mapping logical to physical circuits for some ``Architecture``: ``LexiRouteRoutingMethod`` +* New methods for mapping logical to physical circuits for some ``Architecture``: ``LexiRouteRoutingMethod``, ``LexiLabellingMethod``, ``MultiGateReorderRoutingMethod``. 0.19.2 (February 2022)