diff --git a/.github/disabled-workflows/build_and_test_v2.yml b/.github/disabled-workflows/build_and_test_v2.yml new file mode 100644 index 0000000000..76204e794e --- /dev/null +++ b/.github/disabled-workflows/build_and_test_v2.yml @@ -0,0 +1,436 @@ +name: Build and test v2 + +on: + pull_request: + branches: + - develop2 + push: + branches: + - develop2 + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + + linux: + name: Build and test (Linux) + runs-on: ubuntu-20.04 + env: + CC: gcc-10 + CXX: g++-10 + CONAN_REVISIONS_ENABLED: 1 + PYTKET_SKIP_REGISTRATION: "true" + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* + - name: Get current time + uses: srfrnk/current-time@v1.1.0 + id: current_time + with: + format: YYYYMMDDHHmmss + - name: Cache ccache data + uses: actions/cache@v2 + with: + path: ~/.ccache + key: ${{ runner.os }}-tket-ccache-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-ccache- + - name: apt update + run: sudo apt update + - name: Check doxygen + if: github.event_name == 'pull_request' + run: | + sudo apt install -y doxygen graphviz + cd tket && doxygen + - name: Install conan + id: conan + run: | + pip install conan + conan_cmd=/home/runner/.local/bin/conan + ${conan_cmd} profile new tket --detect + ${conan_cmd} profile update settings.compiler.libcxx=libstdc++11 tket + ${conan_cmd} profile update options.tket:shared=True tket + echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: ${CONAN_CMD} profile update options.tket-tests:full=True tket + - name: add remote + run: ${CONAN_CMD} remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: Install ninja and ccache + run: sudo apt-get install ninja-build ccache + - name: Build tket + run: ${CONAN_CMD} create --profile=tket recipes/tket + - name: Install runtime test requirements + run: | + sudo apt-get install texlive texlive-latex-extra latexmk + mkdir -p ~/texmf/tex/latex + wget http://mirrors.ctan.org/graphics/pgf/contrib/quantikz/tikzlibraryquantikz.code.tex -P ~/texmf/tex/latex + - name: Build and run tket tests + run: ${CONAN_CMD} create --profile=tket recipes/tket-tests + - name: Build and run tket proptests + run: ${CONAN_CMD} create --profile=tket recipes/tket-proptests + - name: Install pybind11 + run: ${CONAN_CMD} create --profile=tket recipes/pybind11 + - name: Set up Python 3.8 + if: github.event_name == 'push' + uses: actions/setup-python@v2 + with: + python-version: '3.8' + - name: Build pytket (3.8) + if: github.event_name == 'push' + run: | + cd pytket + pip install -e . -v + - name: Test pytket (3.8) + if: github.event_name == 'push' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Set up Python 3.9 + if: github.event_name == 'pull_request' + uses: actions/setup-python@v2 + with: + python-version: '3.9' + - name: Build pytket (3.9) + if: github.event_name == 'pull_request' + run: | + cd pytket + pip install -e . -v + - name: Test building docs + if: github.event_name == 'pull_request' + timeout-minutes: 20 + run: | + pip install -r pytket/docs/requirements.txt + ./.github/workflows/build-docs + - name: Test pytket (3.9) + if: github.event_name == 'pull_request' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Set up Python 3.10 + if: github.event_name == 'schedule' + uses: actions/setup-python@v2 + with: + python-version: '3.10' + - name: Build pytket (3.10) + if: github.event_name == 'schedule' + run: | + cd pytket + pip install -e . -v + - name: Test pytket (3.10) + if: github.event_name == 'schedule' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + + macos: + name: Build and test (MacOS) + runs-on: macos-11 + env: + CONAN_REVISIONS_ENABLED: 1 + PYTKET_SKIP_REGISTRATION: "true" + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* + - name: Check C++ code formatting + if: github.event_name == 'pull_request' + run: | + brew update + brew install clang-format@13 + git ls-files "*.cpp" "*.hpp" | xargs clang-format -style=file --dry-run --Werror + - name: Get current time + uses: srfrnk/current-time@v1.1.0 + id: current_time + with: + format: YYYYMMDDHHmmss + - name: Cache ccache data + uses: actions/cache@v2 + with: + path: ~/Library/Caches/ccache + key: ${{ runner.os }}-tket-ccache-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-ccache- + - name: Cache conan data + uses: actions/cache@v2 + with: + path: ~/.conan + key: ${{ runner.os }}-tket-conan-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-conan- + - name: Install ninja and ccache + run: brew install ninja ccache + - name: Set up Python 3.9 + uses: actions/setup-python@v2 + with: + python-version: '3.9' + - name: Install conan + id: conan + run: | + pip install conan + conan profile new tket --detect --force + conan profile update options.tket:shared=True tket + export CC=`which conan` + echo "CONAN_CMD=${CC}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force + - name: Install boost + run: conan install --profile=tket boost/1.78.0@ --build=missing + - name: Build tket + run: conan create --profile=tket recipes/tket --build=spdlog --build=tket + - name: Build and run tket tests + run: conan create --profile=tket recipes/tket-tests + - name: Build and run tket proptests + run: | + conan install --profile=tket rapidcheck/cci.20210702@ --build=missing + conan create --profile=tket recipes/tket-proptests + - name: Install pybind11 + run: conan create --profile=tket recipes/pybind11 + - name: Set up Python 3.8 + if: github.event_name == 'push' + uses: actions/setup-python@v2 + with: + python-version: '3.8' + - name: Build pytket (3.8) + if: github.event_name == 'push' + run: | + cd pytket + pip install -e . -v + - name: Test pytket (3.8) + if: github.event_name == 'push' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Set up Python 3.9 + if: github.event_name == 'pull_request' + uses: actions/setup-python@v2 + with: + python-version: '3.9' + - name: Build pytket (3.9) + if: github.event_name == 'pull_request' + run: | + cd pytket + pip install -e . -v + - name: Test pytket (3.9) + if: github.event_name == 'pull_request' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Run mypy + if: github.event_name == 'pull_request' + run: | + pip install -U mypy + cd pytket + mypy --config-file=mypy.ini --no-incremental -p pytket -p tests + - name: Set up Python 3.10 + if: github.event_name == 'schedule' + uses: actions/setup-python@v2 + with: + python-version: '3.10' + - name: Build pytket (3.10) + if: github.event_name == 'schedule' + run: | + cd pytket + pip install -e . -v + - name: Test pytket (3.10) + if: github.event_name == 'schedule' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + + macos-m1: + name: Build and test (MacOS M1) + runs-on: [self-hosted, macos, M1] + if: github.event_name == 'push' || github.event_name == 'schedule' || github.event.pull_request.head.repo.full_name == github.repository + defaults: + run: + shell: "/usr/bin/arch -arch arm64e /bin/bash {0}" + env: + CONAN_REVISIONS_ENABLED: 1 + PYTKET_SKIP_REGISTRATION: "true" + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* + - name: Set up conan + id: conan + run: | + conan profile new tket --detect --force + conan profile update options.tket:shared=True tket + export CC=`which conan` + echo "CONAN_CMD=${CC}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force + - name: Install boost + run: conan install --profile=tket boost/1.78.0@ --build=missing + - name: Build tket + run: conan create --profile=tket recipes/tket --build=spdlog --build=tket + - name: Build and run tket tests + run: conan create --profile=tket recipes/tket-tests + - name: Build and run tket proptests + run: conan create --profile=tket recipes/tket-proptests + - name: Install pybind11 + run: conan create --profile=tket recipes/pybind11 + - name: Build pytket (3.8) + if: github.event_name == 'push' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.8 + OPENBLAS="$(brew --prefix openblas)" pip install -U scipy + cd pytket + pip install -e . -v + - name: Test pytket (3.8) + if: github.event_name == 'push' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.8 + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Build pytket (3.9) + if: github.event_name == 'pull_request' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.9 + OPENBLAS="$(brew --prefix openblas)" pip install -U scipy + cd pytket + pip install -e . -v + - name: Test pytket (3.9) + if: github.event_name == 'pull_request' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.9 + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Build pytket (3.10) + if: github.event_name == 'schedule' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.10 + OPENBLAS="$(brew --prefix openblas)" pip install -U scipy + cd pytket + pip install -e . -v + - name: Test pytket (3.10) + if: github.event_name == 'schedule' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.10 + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + + windows: + name: Build and test (Windows) + runs-on: windows-2019 + env: + CONAN_REVISIONS_ENABLED: 1 + PYTKET_SKIP_REGISTRATION: "true" + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + - name: Hash tket source + id: hash_tket_source + run: | + Function Get-FolderHash + { + param ($folder) + $files = dir $folder -Recurse |? { -not $_.psiscontainer } + $allBytes = new-object System.Collections.Generic.List[byte] + foreach ($file in $files) + { + $allBytes.AddRange([System.IO.File]::ReadAllBytes($file.FullName)) + $allBytes.AddRange([System.Text.Encoding]::UTF8.GetBytes($file.Name)) + } + $hasher = [System.Security.Cryptography.MD5]::Create() + $ret = [string]::Join("",$($hasher.ComputeHash($allBytes.ToArray()) | %{"{0:x2}" -f $_})) + return $ret + } + $tket_hash = Get-FolderHash tket + echo "::set-output name=tket_hash::${tket_hash}" + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* + - name: Install conan + run: | + pip install conan + conan profile new tket --detect + conan profile update options.tket:shared=True tket + $conan_cmd = (gcm conan).Path + echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: Cache tket build + id: cache-tket + uses: actions/cache@v2 + with: + path: C:\Users\runneradmin\.conan\data\tket + key: ${{ runner.os }}-tket-tket-${{ steps.hash_tket_source.outputs.tket_hash }}-v2-1 + - name: Build tket + if: steps.cache-tket.outputs.cache-hit != 'true' + run: conan create --profile=tket recipes/tket + - name: Build and run tket tests + run: conan create --profile=tket recipes/tket-tests + - name: Build and run tket proptests + run: conan create --profile=tket recipes/tket-proptests + - name: Install pybind11 + run: conan create --profile=tket recipes/pybind11 + - name: Set up Python 3.8 + if: github.event_name == 'push' + uses: actions/setup-python@v2 + with: + python-version: '3.8' + - name: Build and test pytket (3.8) + if: github.event_name == 'push' + run: | + cd pytket + pip install -e . -v + cd tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Set up Python 3.9 + if: github.event_name == 'pull_request' + uses: actions/setup-python@v2 + with: + python-version: '3.9' + - name: Build and test pytket (3.9) + if: github.event_name == 'pull_request' + run: | + cd pytket + pip install -e . -v + cd tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Set up Python 3.10 + if: github.event_name == 'schedule' + uses: actions/setup-python@v2 + with: + python-version: '3.10' + - name: Build and test pytket (3.10) + if: github.event_name == 'schedule' + run: | + cd pytket + pip install -e . -v + cd tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules diff --git a/.github/disabled-workflows/build_and_test_v2_nightly.yml b/.github/disabled-workflows/build_and_test_v2_nightly.yml new file mode 100644 index 0000000000..81da829f77 --- /dev/null +++ b/.github/disabled-workflows/build_and_test_v2_nightly.yml @@ -0,0 +1,437 @@ +name: Build and test v2 nightly + +on: + schedule: + # 03:00 every Saturday morning + - cron: '0 3 * * 6' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + + linux: + name: Build and test (Linux) + runs-on: ubuntu-20.04 + env: + CC: gcc-10 + CXX: g++-10 + CONAN_REVISIONS_ENABLED: 1 + PYTKET_SKIP_REGISTRATION: "true" + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + ref: develop2 + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* + - name: Get current time + uses: srfrnk/current-time@v1.1.0 + id: current_time + with: + format: YYYYMMDDHHmmss + - name: Cache ccache data + uses: actions/cache@v2 + with: + path: ~/.ccache + key: ${{ runner.os }}-tket-ccache-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-ccache- + - name: apt update + run: sudo apt update + - name: Check doxygen + if: github.event_name == 'pull_request' + run: | + sudo apt install -y doxygen graphviz + cd tket && doxygen + - name: Install conan + id: conan + run: | + pip install conan + conan_cmd=/home/runner/.local/bin/conan + ${conan_cmd} profile new tket --detect + ${conan_cmd} profile update settings.compiler.libcxx=libstdc++11 tket + ${conan_cmd} profile update options.tket:shared=True tket + echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: ${CONAN_CMD} profile update options.tket-tests:full=True tket + - name: add remote + run: ${CONAN_CMD} remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: Install ninja and ccache + run: sudo apt-get install ninja-build ccache + - name: Build tket + run: ${CONAN_CMD} create --profile=tket recipes/tket + - name: Install runtime test requirements + run: | + sudo apt-get install texlive texlive-latex-extra latexmk + mkdir -p ~/texmf/tex/latex + wget http://mirrors.ctan.org/graphics/pgf/contrib/quantikz/tikzlibraryquantikz.code.tex -P ~/texmf/tex/latex + - name: Build and run tket tests + run: ${CONAN_CMD} create --profile=tket recipes/tket-tests + - name: Build and run tket proptests + run: ${CONAN_CMD} create --profile=tket recipes/tket-proptests + - name: Install pybind11 + run: ${CONAN_CMD} create --profile=tket recipes/pybind11 + - name: Set up Python 3.8 + if: github.event_name == 'push' + uses: actions/setup-python@v2 + with: + python-version: '3.8' + - name: Build pytket (3.8) + if: github.event_name == 'push' + run: | + cd pytket + pip install -e . -v + - name: Test pytket (3.8) + if: github.event_name == 'push' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Set up Python 3.9 + if: github.event_name == 'pull_request' + uses: actions/setup-python@v2 + with: + python-version: '3.9' + - name: Build pytket (3.9) + if: github.event_name == 'pull_request' + run: | + cd pytket + pip install -e . -v + - name: Test building docs + if: github.event_name == 'pull_request' + timeout-minutes: 20 + run: | + pip install -r pytket/docs/requirements.txt + ./.github/workflows/build-docs + - name: Test pytket (3.9) + if: github.event_name == 'pull_request' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Set up Python 3.10 + if: github.event_name == 'schedule' + uses: actions/setup-python@v2 + with: + python-version: '3.10' + - name: Build pytket (3.10) + if: github.event_name == 'schedule' + run: | + cd pytket + pip install -e . -v + - name: Test pytket (3.10) + if: github.event_name == 'schedule' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + + macos: + name: Build and test (MacOS) + runs-on: macos-11 + env: + CONAN_REVISIONS_ENABLED: 1 + PYTKET_SKIP_REGISTRATION: "true" + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + ref: develop2 + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* + - name: Check C++ code formatting + if: github.event_name == 'pull_request' + run: | + brew update + brew install clang-format@13 + git ls-files "*.cpp" "*.hpp" | xargs clang-format -style=file --dry-run --Werror + - name: Get current time + uses: srfrnk/current-time@v1.1.0 + id: current_time + with: + format: YYYYMMDDHHmmss + - name: Cache ccache data + uses: actions/cache@v2 + with: + path: ~/Library/Caches/ccache + key: ${{ runner.os }}-tket-ccache-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-ccache- + - name: Cache conan data + uses: actions/cache@v2 + with: + path: ~/.conan + key: ${{ runner.os }}-tket-conan-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-conan- + - name: Install ninja and ccache + run: brew install ninja ccache + - name: Set up Python 3.9 + uses: actions/setup-python@v2 + with: + python-version: '3.9' + - name: Install conan + id: conan + run: | + pip install conan + conan profile new tket --detect --force + conan profile update options.tket:shared=True tket + export CC=`which conan` + echo "CONAN_CMD=${CC}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force + - name: Install boost + run: conan install --profile=tket boost/1.78.0@ --build=missing + - name: Build tket + run: conan create --profile=tket recipes/tket --build=spdlog --build=tket + - name: Build and run tket tests + run: conan create --profile=tket recipes/tket-tests + - name: Build and run tket proptests + run: | + conan install --profile=tket rapidcheck/cci.20210702@ --build=missing + conan create --profile=tket recipes/tket-proptests + - name: Install pybind11 + run: conan create --profile=tket recipes/pybind11 + - name: Set up Python 3.8 + if: github.event_name == 'push' + uses: actions/setup-python@v2 + with: + python-version: '3.8' + - name: Build pytket (3.8) + if: github.event_name == 'push' + run: | + cd pytket + pip install -e . -v + - name: Test pytket (3.8) + if: github.event_name == 'push' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Set up Python 3.9 + if: github.event_name == 'pull_request' + uses: actions/setup-python@v2 + with: + python-version: '3.9' + - name: Build pytket (3.9) + if: github.event_name == 'pull_request' + run: | + cd pytket + pip install -e . -v + - name: Test pytket (3.9) + if: github.event_name == 'pull_request' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Run mypy + if: github.event_name == 'pull_request' + run: | + pip install -U mypy + cd pytket + mypy --config-file=mypy.ini --no-incremental -p pytket -p tests + - name: Set up Python 3.10 + if: github.event_name == 'schedule' + uses: actions/setup-python@v2 + with: + python-version: '3.10' + - name: Build pytket (3.10) + if: github.event_name == 'schedule' + run: | + cd pytket + pip install -e . -v + - name: Test pytket (3.10) + if: github.event_name == 'schedule' + run: | + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + + macos-m1: + name: Build and test (MacOS M1) + runs-on: [self-hosted, macos, M1] + if: github.event_name == 'push' || github.event_name == 'schedule' || github.event.pull_request.head.repo.full_name == github.repository + defaults: + run: + shell: "/usr/bin/arch -arch arm64e /bin/bash {0}" + env: + CONAN_REVISIONS_ENABLED: 1 + PYTKET_SKIP_REGISTRATION: "true" + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + ref: develop2 + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* + - name: Set up conan + id: conan + run: | + conan profile new tket --detect --force + conan profile update options.tket:shared=True tket + export CC=`which conan` + echo "CONAN_CMD=${CC}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force + - name: Install boost + run: conan install --profile=tket boost/1.78.0@ --build=missing + - name: Build tket + run: conan create --profile=tket recipes/tket --build=spdlog --build=tket + - name: Build and run tket tests + run: conan create --profile=tket recipes/tket-tests + - name: Build and run tket proptests + run: conan create --profile=tket recipes/tket-proptests + - name: Install pybind11 + run: conan create --profile=tket recipes/pybind11 + - name: Build pytket (3.8) + if: github.event_name == 'push' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.8 + OPENBLAS="$(brew --prefix openblas)" pip install -U scipy + cd pytket + pip install -e . -v + - name: Test pytket (3.8) + if: github.event_name == 'push' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.8 + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Build pytket (3.9) + if: github.event_name == 'pull_request' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.9 + OPENBLAS="$(brew --prefix openblas)" pip install -U scipy + cd pytket + pip install -e . -v + - name: Test pytket (3.9) + if: github.event_name == 'pull_request' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.9 + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Build pytket (3.10) + if: github.event_name == 'schedule' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.10 + OPENBLAS="$(brew --prefix openblas)" pip install -U scipy + cd pytket + pip install -e . -v + - name: Test pytket (3.10) + if: github.event_name == 'schedule' + run: | + eval "$(pyenv init -)" + pyenv shell tket-3.10 + cd pytket/tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + + windows: + name: Build and test (Windows) + runs-on: windows-2019 + env: + CONAN_REVISIONS_ENABLED: 1 + PYTKET_SKIP_REGISTRATION: "true" + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + ref: develop2 + - name: Hash tket source + id: hash_tket_source + run: | + Function Get-FolderHash + { + param ($folder) + $files = dir $folder -Recurse |? { -not $_.psiscontainer } + $allBytes = new-object System.Collections.Generic.List[byte] + foreach ($file in $files) + { + $allBytes.AddRange([System.IO.File]::ReadAllBytes($file.FullName)) + $allBytes.AddRange([System.Text.Encoding]::UTF8.GetBytes($file.Name)) + } + $hasher = [System.Security.Cryptography.MD5]::Create() + $ret = [string]::Join("",$($hasher.ComputeHash($allBytes.ToArray()) | %{"{0:x2}" -f $_})) + return $ret + } + $tket_hash = Get-FolderHash tket + echo "::set-output name=tket_hash::${tket_hash}" + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* + - name: Install conan + run: | + pip install conan + conan profile new tket --detect + conan profile update options.tket:shared=True tket + $conan_cmd = (gcm conan).Path + echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: Cache tket build + id: cache-tket + uses: actions/cache@v2 + with: + path: C:\Users\runneradmin\.conan\data\tket + key: ${{ runner.os }}-tket-tket-${{ steps.hash_tket_source.outputs.tket_hash }}-v2-1 + - name: Build tket + if: steps.cache-tket.outputs.cache-hit != 'true' + run: conan create --profile=tket recipes/tket + - name: Build and run tket tests + run: conan create --profile=tket recipes/tket-tests + - name: Build and run tket proptests + run: conan create --profile=tket recipes/tket-proptests + - name: Install pybind11 + run: conan create --profile=tket recipes/pybind11 + - name: Set up Python 3.8 + if: github.event_name == 'push' + uses: actions/setup-python@v2 + with: + python-version: '3.8' + - name: Build and test pytket (3.8) + if: github.event_name == 'push' + run: | + cd pytket + pip install -e . -v + cd tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Set up Python 3.9 + if: github.event_name == 'pull_request' + uses: actions/setup-python@v2 + with: + python-version: '3.9' + - name: Build and test pytket (3.9) + if: github.event_name == 'pull_request' + run: | + cd pytket + pip install -e . -v + cd tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules + - name: Set up Python 3.10 + if: github.event_name == 'schedule' + uses: actions/setup-python@v2 + with: + python-version: '3.10' + - name: Build and test pytket (3.10) + if: github.event_name == 'schedule' + run: | + cd pytket + pip install -e . -v + cd tests + pip install -r requirements.txt + pytest --ignore=simulator/ --doctest-modules diff --git a/.github/disabled-workflows/coverage_v2.yml b/.github/disabled-workflows/coverage_v2.yml new file mode 100644 index 0000000000..708cdd9959 --- /dev/null +++ b/.github/disabled-workflows/coverage_v2.yml @@ -0,0 +1,147 @@ +name: Analyse tket C++ test coverage v2 + +on: + pull_request: + branches: + - develop2 + push: + branches: + - develop2 + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + + changes: + runs-on: ubuntu-20.04 + outputs: + tket: ${{ steps.filter.outputs.tket }} + steps: + - uses: actions/checkout@v2 + - uses: dorny/paths-filter@v2 + id: filter + with: + base: ${{ github.ref }} + filters: | + tket: + - 'tket/**' + + generate_coverage: + name: Generate coverage report + needs: changes + if: needs.changes.outputs.tket == 'true' + runs-on: ubuntu-20.04 + env: + CC: gcc-10 + CXX: g++-10 + CONAN_REVISIONS_ENABLED: 1 + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* + - name: Get current time + uses: srfrnk/current-time@v1.1.0 + id: current_time + with: + format: YYYYMMDDHHmmss + - name: Cache ccache data + uses: actions/cache@v2 + with: + path: ~/.ccache + key: ${{ runner.os }}-tket-ccache-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-ccache- + - name: Install conan + id: conan + run: | + pip install conan + conan_cmd=/home/runner/.local/bin/conan + ${conan_cmd} profile new tket --detect + ${conan_cmd} profile update settings.compiler.libcxx=libstdc++11 tket + ${conan_cmd} profile update options.tket:shared=True tket + ${conan_cmd} profile update settings.build_type=Debug tket + echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: add remote + run: ${CONAN_CMD} remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: Install ninja and ccache + run: | + sudo apt-get update + sudo apt-get install ninja-build ccache + - name: Build tket + run: | + ${CONAN_CMD} install recipes/tket --install-folder=build/tket --profile=tket -o tket:profile_coverage=True + ${CONAN_CMD} build recipes/tket --configure --build-folder=build/tket --source-folder=tket/src + ${CONAN_CMD} build recipes/tket --build --build-folder=build/tket + ${CONAN_CMD} export-pkg recipes/tket -f --build-folder=build/tket --source-folder=tket/src + - name: Build tket tests + run: | + ${CONAN_CMD} install recipes/tket-tests --install-folder=build/tket-tests --profile=tket -o tket-tests:with_coverage=True + ${CONAN_CMD} build recipes/tket-tests --configure --build-folder=build/tket-tests --source-folder=tket/tests + ${CONAN_CMD} build recipes/tket-tests --build --build-folder=build/tket-tests + - name: Install runtime test requirements + run: | + sudo apt-get install texlive texlive-latex-extra latexmk + mkdir -p ~/texmf/tex/latex + wget http://mirrors.ctan.org/graphics/pgf/contrib/quantikz/tikzlibraryquantikz.code.tex -P ~/texmf/tex/latex + - name: Run tket tests + run: ./build/tket-tests/bin/test_tket + - name: Install gcovr + run: pip install gcovr + - name: Build coverage report + run: | + mkdir test-coverage + gcovr --print-summary --html --html-details -r ./tket/src/ --exclude-lines-by-pattern '.*\bTKET_ASSERT\(.*\);' --object-directory ${PWD}/build/tket/ -o test-coverage/index.html > test-coverage/summary.txt + - name: Upload artefact + uses: actions/upload-artifact@v2 + with: + name: test_coverage + path: test-coverage/ + + publish_coverage: + name: Publish coverage + needs: generate_coverage + concurrency: gh_pages + if: github.event_name == 'push' && needs.changes.outputs.tket == 'true' + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + with: + ref: gh-pages + - name: Download artefact + uses: actions/download-artifact@v2 + with: + name: test_coverage + path: test-coverage/ + - name: Configure git + run: | + git config --global user.email "tket-bot@cambridgequantum.com" + git config --global user.name "«$GITHUB_WORKFLOW» github action" + - name: Remove old report + run: git rm -r docs/tket-v2/test-coverage + - name: Add report to repository + run: | + mv test-coverage/ docs/tket-v2/ + git add -f docs/tket-v2/test-coverage + git commit --allow-empty -m "Add generated coverage report." + - name: Publish report + run: git push origin gh-pages:gh-pages + + check_coverage: + name: Check coverage + needs: generate_coverage + if: github.event_name == 'pull_request' && needs.changes.outputs.tket == 'true' + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + - name: Download artefact + uses: actions/download-artifact@v2 + with: + name: test_coverage + path: test-coverage/ + - name: Compare with latest report from develop + run: | + wget https://cqcl.github.io/tket/tket-v2/test-coverage/summary.txt + ./.github/workflows/compare-coverage summary.txt test-coverage/summary.txt diff --git a/.github/disabled-workflows/docs_v2.yml b/.github/disabled-workflows/docs_v2.yml new file mode 100644 index 0000000000..108e861a42 --- /dev/null +++ b/.github/disabled-workflows/docs_v2.yml @@ -0,0 +1,26 @@ +name: Build tket C++ documentation v2 + +on: + push: + branches: + - develop2 + paths: + - 'tket/src/**' + +jobs: + build_docs: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* + - name: Install Doxygen + run: sudo apt update && sudo apt install -y doxygen graphviz + - name: Build Doxygen docs + run: cd tket && doxygen + - name: Upload artefact + uses: actions/upload-artifact@v2 + with: + name: tket_docs + path: tket/doc/html/ \ No newline at end of file diff --git a/.github/disabled-workflows/valgrind_v2.yml b/.github/disabled-workflows/valgrind_v2.yml new file mode 100644 index 0000000000..f49684f0f5 --- /dev/null +++ b/.github/disabled-workflows/valgrind_v2.yml @@ -0,0 +1,76 @@ +name: valgrind check v2 +on: + pull_request: + branches: + - develop2 + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true +jobs: + changes: + runs-on: ubuntu-20.04 + outputs: + tket: ${{ steps.filter.outputs.tket }} + steps: + - uses: actions/checkout@v2 + - uses: dorny/paths-filter@v2 + id: filter + with: + base: ${{ github.ref }} + filters: | + tket: + - 'tket/**' + check: + runs-on: ubuntu-20.04 + needs: changes + if: needs.changes.outputs.tket == 'true' + env: + CC: gcc-10 + CXX: g++-10 + CONAN_REVISIONS_ENABLED: 1 + steps: + - uses: actions/checkout@v2 + - name: cache ccache data + uses: actions/cache@v2 + with: + path: ~/.ccache + key: ${{ runner.os }}-tket-ccache-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-ccache- + - name: apt update + run: sudo apt update + - name: Install conan + id: conan + run: | + pip install conan + conan profile new tket --detect + conan profile update settings.compiler.libcxx=libstdc++11 tket + conan profile update options.tket:shared=True tket + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: install tex components + run: | + sudo apt install texlive texlive-latex-extra latexmk + mkdir -p ~/texmf/tex/latex + wget http://mirrors.ctan.org/graphics/pgf/contrib/quantikz/tikzlibraryquantikz.code.tex -P ~/texmf/tex/latex + - name: install valgrind + run: sudo apt install valgrind + - name: install ninja and ccache + run: sudo apt-get install ninja-build ccache + - name: build tket + run: | + conan install recipes/tket --install-folder=build/tket --profile=tket + conan build recipes/tket --configure --build-folder=build/tket --source-folder=tket/src + conan build recipes/tket --build --build-folder=build/tket + conan export-pkg recipes/tket -f --build-folder=build/tket --source-folder=tket/src + - name: build tket tests + run: | + conan install recipes/tket-tests --install-folder=build/tket-tests --profile=tket + conan build recipes/tket-tests --configure --build-folder=build/tket-tests --source-folder=tket/tests + conan build recipes/tket-tests --build --build-folder=build/tket-tests + - name: run tests under valgrind + run: valgrind --error-exitcode=1 ./build/tket-tests/bin/test_tket diff --git a/.github/disabled-workflows/valgrind_v2_nightly.yml b/.github/disabled-workflows/valgrind_v2_nightly.yml new file mode 100644 index 0000000000..bd6aefc811 --- /dev/null +++ b/.github/disabled-workflows/valgrind_v2_nightly.yml @@ -0,0 +1,82 @@ +name: valgrind check v2 nightly +on: + schedule: + # 03:00 every Monday morning + - cron: '0 3 * * 1' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true +jobs: + changes: + runs-on: ubuntu-20.04 + outputs: + tket: ${{ steps.filter.outputs.tket }} + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + ref: develop2 + - uses: dorny/paths-filter@v2 + id: filter + with: + base: ${{ github.ref }} + filters: | + tket: + - 'tket/**' + check: + runs-on: ubuntu-20.04 + needs: changes + if: needs.changes.outputs.tket == 'true' + env: + CC: gcc-10 + CXX: g++-10 + CONAN_REVISIONS_ENABLED: 1 + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + ref: develop2 + - name: cache ccache data + uses: actions/cache@v2 + with: + path: ~/.ccache + key: ${{ runner.os }}-tket-ccache-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-ccache- + - name: apt update + run: sudo apt update + - name: Install conan + id: conan + run: | + pip install conan + conan profile new tket --detect + conan profile update settings.compiler.libcxx=libstdc++11 tket + conan profile update options.tket:shared=True tket + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: install tex components + run: | + sudo apt install texlive texlive-latex-extra latexmk + mkdir -p ~/texmf/tex/latex + wget http://mirrors.ctan.org/graphics/pgf/contrib/quantikz/tikzlibraryquantikz.code.tex -P ~/texmf/tex/latex + - name: install valgrind + run: sudo apt install valgrind + - name: install ninja and ccache + run: sudo apt-get install ninja-build ccache + - name: build tket + run: | + conan install recipes/tket --install-folder=build/tket --profile=tket + conan build recipes/tket --configure --build-folder=build/tket --source-folder=tket/src + conan build recipes/tket --build --build-folder=build/tket + conan export-pkg recipes/tket -f --build-folder=build/tket --source-folder=tket/src + - name: build tket tests + run: | + conan install recipes/tket-tests --install-folder=build/tket-tests --profile=tket + conan build recipes/tket-tests --configure --build-folder=build/tket-tests --source-folder=tket/tests + conan build recipes/tket-tests --build --build-folder=build/tket-tests + - name: run tests under valgrind + run: valgrind --error-exitcode=1 ./build/tket-tests/bin/test_tket diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 26241373ac..d4b3408aea 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -12,6 +12,10 @@ on: # 03:00 every Saturday morning - cron: '0 3 * * 6' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: linux: @@ -20,6 +24,7 @@ jobs: env: CC: gcc-10 CXX: g++-10 + CONAN_REVISIONS_ENABLED: 1 PYTKET_SKIP_REGISTRATION: "true" steps: - uses: actions/checkout@v2 @@ -54,10 +59,13 @@ jobs: ${conan_cmd} profile update settings.compiler.libcxx=libstdc++11 tket ${conan_cmd} profile update options.tket:shared=True tket echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: ${CONAN_CMD} profile update options.tket-tests:full=True tket + - name: add remote + run: ${CONAN_CMD} remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan - name: Install ninja and ccache run: sudo apt-get install ninja-build ccache - - name: Build symengine - run: ${CONAN_CMD} create --profile=tket recipes/symengine - name: Build tket run: ${CONAN_CMD} create --profile=tket recipes/tket - name: Install runtime test requirements @@ -86,7 +94,6 @@ jobs: run: | cd pytket/tests pip install -r requirements.txt - pip install -r requirements-openfermion.txt pytest --ignore=simulator/ --doctest-modules - name: Set up Python 3.9 if: github.event_name == 'pull_request' @@ -109,7 +116,6 @@ jobs: run: | cd pytket/tests pip install -r requirements.txt - pip install -r requirements-openfermion.txt pytest --ignore=simulator/ --doctest-modules - name: Set up Python 3.10 if: github.event_name == 'schedule' @@ -132,6 +138,7 @@ jobs: name: Build and test (MacOS) runs-on: macos-11 env: + CONAN_REVISIONS_ENABLED: 1 PYTKET_SKIP_REGISTRATION: "true" steps: - uses: actions/checkout@v2 @@ -177,12 +184,15 @@ jobs: conan profile update options.tket:shared=True tket export CC=`which conan` echo "CONAN_CMD=${CC}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force - name: Install boost run: conan install --profile=tket boost/1.78.0@ --build=missing - - name: Build symengine - run: conan create --profile=tket recipes/symengine - name: Build tket - run: conan create --profile=tket recipes/tket + run: conan create --profile=tket recipes/tket --build=spdlog --build=tket - name: Build and run tket tests run: conan create --profile=tket recipes/tket-tests - name: Build and run tket proptests @@ -206,7 +216,6 @@ jobs: run: | cd pytket/tests pip install -r requirements.txt - pip install -r requirements-openfermion.txt pytest --ignore=simulator/ --doctest-modules - name: Set up Python 3.9 if: github.event_name == 'pull_request' @@ -223,7 +232,6 @@ jobs: run: | cd pytket/tests pip install -r requirements.txt - pip install -r requirements-openfermion.txt pytest --ignore=simulator/ --doctest-modules - name: Run mypy if: github.event_name == 'pull_request' @@ -256,6 +264,7 @@ jobs: run: shell: "/usr/bin/arch -arch arm64e /bin/bash {0}" env: + CONAN_REVISIONS_ENABLED: 1 PYTKET_SKIP_REGISTRATION: "true" steps: - uses: actions/checkout@v2 @@ -269,12 +278,15 @@ jobs: conan profile update options.tket:shared=True tket export CC=`which conan` echo "CONAN_CMD=${CC}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force - name: Install boost run: conan install --profile=tket boost/1.78.0@ --build=missing - - name: Build symengine - run: conan create --profile=tket recipes/symengine - name: Build tket - run: conan create --profile=tket recipes/tket + run: conan create --profile=tket recipes/tket --build=spdlog --build=tket - name: Build and run tket tests run: conan create --profile=tket recipes/tket-tests - name: Build and run tket proptests @@ -334,6 +346,7 @@ jobs: name: Build and test (Windows) runs-on: windows-2019 env: + CONAN_REVISIONS_ENABLED: 1 PYTKET_SKIP_REGISTRATION: "true" steps: - uses: actions/checkout@v2 @@ -366,14 +379,17 @@ jobs: conan profile update options.tket:shared=True tket $conan_cmd = (gcm conan).Path echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan - name: Cache tket build id: cache-tket uses: actions/cache@v2 with: path: C:\Users\runneradmin\.conan\data\tket - key: ${{ runner.os }}-tket-tket-${{ steps.hash_tket_source.outputs.tket_hash }}-14 - - name: Build symengine - run: conan create --profile=tket recipes/symengine + key: ${{ runner.os }}-tket-tket-${{ steps.hash_tket_source.outputs.tket_hash }}-15 - name: Build tket if: steps.cache-tket.outputs.cache-hit != 'true' run: conan create --profile=tket recipes/tket @@ -395,7 +411,6 @@ jobs: pip install -e . -v cd tests pip install -r requirements.txt - pip install -r requirements-openfermion.txt pytest --ignore=simulator/ --doctest-modules - name: Set up Python 3.9 if: github.event_name == 'pull_request' @@ -409,7 +424,6 @@ jobs: pip install -e . -v cd tests pip install -r requirements.txt - pip install -r requirements-openfermion.txt pytest --ignore=simulator/ --doctest-modules - name: Set up Python 3.10 if: github.event_name == 'schedule' diff --git a/.github/workflows/build_macos_m1_wheel b/.github/workflows/build_macos_m1_wheel index 27136ae12d..b0f1348c4c 100755 --- a/.github/workflows/build_macos_m1_wheel +++ b/.github/workflows/build_macos_m1_wheel @@ -16,9 +16,8 @@ set -evu -pip install conan delocate wheel cd $GITHUB_WORKSPACE/pytket export PYVER=`python -c 'import sys; print(".".join(map(str, sys.version_info[:3])))'` -python -m pip install -U pip setuptools_scm -python setup.py bdist_wheel -d "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}" +python -m pip install -U pip build delocate +python -m build --outdir "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}" delocate-wheel -v -w "$GITHUB_WORKSPACE/wheelhouse/${PYVER}/" "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}/pytket-"*".whl" diff --git a/.github/workflows/build_macos_wheel b/.github/workflows/build_macos_wheel index 95fa5c25ee..0e7e097175 100755 --- a/.github/workflows/build_macos_wheel +++ b/.github/workflows/build_macos_wheel @@ -16,9 +16,10 @@ set -evu -pip install conan delocate wheel cd $GITHUB_WORKSPACE/pytket export PYVER=`python -c 'import sys; print(".".join(map(str, sys.version_info[:3])))'` -python -m pip install -U pip setuptools_scm -python setup.py bdist_wheel -d "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}" --plat-name=macosx_10_14_x86_64 +# Ensure wheels are compatible with MacOS 10.14 and later: +export WHEEL_PLAT_NAME=macosx_10_14_x86_64 +python -m pip install -U pip build delocate +python -m build --outdir "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}" delocate-wheel -v -w "$GITHUB_WORKSPACE/wheelhouse/${PYVER}/" "$GITHUB_WORKSPACE/tmp/tmpwheel_${PYVER}/pytket-"*".whl" diff --git a/.github/workflows/build_symengine.yml b/.github/workflows/build_symengine.yml new file mode 100644 index 0000000000..0d9680dafd --- /dev/null +++ b/.github/workflows/build_symengine.yml @@ -0,0 +1,169 @@ +name: build symengine +on: + push: + branches: + - develop + pull_request: + branches: + - develop +jobs: + changes: + runs-on: ubuntu-20.04 + outputs: + recipes_symengine: ${{ steps.filter.outputs.recipes_symengine }} + steps: + - uses: actions/checkout@v2 + - uses: dorny/paths-filter@v2 + id: filter + with: + base: ${{ github.ref }} + filters: | + recipes_symengine: + - 'recipes/symengine/**' + linux: + name: build symengine (linux) + needs: changes + if: needs.changes.outputs.recipes_symengine == 'true' + runs-on: ubuntu-20.04 + strategy: + matrix: + build_type: ['Release', 'Debug'] + shared: ['True', 'False'] + env: + CC: gcc-10 + CXX: g++-10 + CONAN_REVISIONS_ENABLED: 1 + steps: + - uses: actions/checkout@v2 + - name: install conan + run: pip install conan + - name: create profile + run: | + conan profile new tket --detect + conan profile update settings.compiler.libcxx=libstdc++11 tket + - name: build symengine + run: conan create --profile=tket -s build_type=${{ matrix.build_type }} -o symengine:shared=${{ matrix.shared }} recipes/symengine tket/stable + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: authenticate to repository + run: conan user -p ${{ secrets.JFROG_ARTIFACTORY_TOKEN_1 }} -r tket-conan ${{ secrets.JFROG_ARTIFACTORY_USER_1 }} + - name: get version + run: | + symengine_ver=$(conan inspect --raw version recipes/symengine/) + echo "SYMENGINE_VER=${symengine_ver}" >> $GITHUB_ENV + - name: upload package (dry run) + if: github.event_name == 'pull_request' + run: conan upload symengine/${SYMENGINE_VER}@tket/stable --all -r=tket-conan --skip-upload + - name: upload package + if: github.event_name == 'push' + run: conan upload symengine/${SYMENGINE_VER}@tket/stable --all -r=tket-conan + macos: + name: build symengine (macos) + needs: changes + if: needs.changes.outputs.recipes_symengine == 'true' + runs-on: macos-11 + strategy: + matrix: + build_type: ['Release', 'Debug'] + shared: ['True', 'False'] + env: + CONAN_REVISIONS_ENABLED: 1 + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.9 + uses: actions/setup-python@v2 + with: + python-version: '3.9' + - name: install conan + run: pip install conan + - name: create profile + run: conan profile new tket --detect + - name: install boost + run: conan install --profile=tket -s build_type=${{ matrix.build_type }} boost/1.78.0@ --build=missing + - name: build symengine + run: conan create --profile=tket -s build_type=${{ matrix.build_type }} -o symengine:shared=${{ matrix.shared }} recipes/symengine tket/stable + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: authenticate to repository + run: conan user -p ${{ secrets.JFROG_ARTIFACTORY_TOKEN_1 }} -r tket-conan ${{ secrets.JFROG_ARTIFACTORY_USER_1 }} + - name: get version + run: | + symengine_ver=$(conan inspect --raw version recipes/symengine/) + echo "SYMENGINE_VER=${symengine_ver}" >> $GITHUB_ENV + - name: upload package (dry run) + if: github.event_name == 'pull_request' + run: conan upload symengine/${SYMENGINE_VER}@tket/stable --all -r=tket-conan --skip-upload + - name: upload package + if: github.event_name == 'push' + run: conan upload symengine/${SYMENGINE_VER}@tket/stable --all -r=tket-conan + macos-m1: + name: build symengine (macos-m1) + needs: changes + if: needs.changes.outputs.recipes_symengine == 'true' + runs-on: [self-hosted, macos, M1] + strategy: + matrix: + build_type: ['Release', 'Debug'] + shared: ['True', 'False'] + env: + CONAN_REVISIONS_ENABLED: 1 + steps: + - uses: actions/checkout@v2 + - name: install conan + run: pip install -U conan + - name: create profile + run: conan profile new tket --detect --force + - name: build symengine + run: conan create --profile=tket -s build_type=${{ matrix.build_type }} -o symengine:shared=${{ matrix.shared }} recipes/symengine tket/stable + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force + - name: authenticate to repository + run: conan user -p ${{ secrets.JFROG_ARTIFACTORY_TOKEN_1 }} -r tket-conan ${{ secrets.JFROG_ARTIFACTORY_USER_1 }} + - name: get version + run: | + symengine_ver=$(conan inspect --raw version recipes/symengine/) + echo "SYMENGINE_VER=${symengine_ver}" >> $GITHUB_ENV + - name: upload package (dry run) + if: github.event_name == 'pull_request' + run: conan upload symengine/${SYMENGINE_VER}@tket/stable --all -r=tket-conan --skip-upload + - name: upload package + if: github.event_name == 'push' + run: conan upload symengine/${SYMENGINE_VER}@tket/stable --all -r=tket-conan + windows: + name: build symengine (windows) + needs: changes + if: needs.changes.outputs.recipes_symengine == 'true' + runs-on: windows-2019 + strategy: + matrix: + build_type: ['Release', 'Debug'] + shared: ['True', 'False'] + env: + CONAN_REVISIONS_ENABLED: 1 + steps: + - uses: actions/checkout@v2 + - name: install conan + run: pip install conan + - name: create profile + run: conan profile new tket --detect + - name: normalize line endings in conanfile + run: | + $conanfile ='recipes/symengine/conanfile.py' + $normalized_file = [IO.File]::ReadAllText($conanfile) -replace "`r`n", "`n" + [IO.File]::WriteAllText($conanfile, $normalized_file) + - name: build symengine + run: conan create --profile=tket -s build_type=${{ matrix.build_type }} -o symengine:shared=${{ matrix.shared }} recipes/symengine tket/stable + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: authenticate to repository + run: conan user -p ${{ secrets.JFROG_ARTIFACTORY_TOKEN_1 }} -r tket-conan ${{ secrets.JFROG_ARTIFACTORY_USER_1 }} + - name: get version + run: | + $symengine_ver = conan inspect --raw version recipes/symengine/ + echo "SYMENGINE_VER=${symengine_ver}" >> $env:GITHUB_ENV + - name: upload package (dry run) + if: github.event_name == 'pull_request' + run: conan upload symengine/${{ env.SYMENGINE_VER }}@tket/stable --all -r=tket-conan --skip-upload + - name: upload package + if: github.event_name == 'push' + run: conan upload symengine/${{ env.SYMENGINE_VER }}@tket/stable --all -r=tket-conan diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index dacc7bce93..0cc2e1e4a2 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -8,6 +8,10 @@ on: branches: - develop +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: changes: @@ -32,6 +36,7 @@ jobs: env: CC: gcc-10 CXX: g++-10 + CONAN_REVISIONS_ENABLED: 1 steps: - uses: actions/checkout@v2 with: @@ -59,13 +64,12 @@ jobs: ${conan_cmd} profile update options.tket:shared=True tket ${conan_cmd} profile update settings.build_type=Debug tket echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: add remote + run: ${CONAN_CMD} remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan - name: Install ninja and ccache run: | sudo apt-get update sudo apt-get install ninja-build ccache - - name: Build symengine - run: - ${CONAN_CMD} create --profile=tket recipes/symengine - name: Build tket run: | ${CONAN_CMD} install recipes/tket --install-folder=build/tket --profile=tket -o tket:profile_coverage=True diff --git a/.github/workflows/linuxbuildwheel b/.github/workflows/linuxbuildwheel index 7618b8a6f8..5b65fe5f97 100755 --- a/.github/workflows/linuxbuildwheel +++ b/.github/workflows/linuxbuildwheel @@ -27,8 +27,10 @@ cd /tket ${CONAN_CMD} profile new tket --detect ${CONAN_CMD} profile update options.tket:shared=True tket -${CONAN_CMD} create --profile=tket recipes/symengine -${CONAN_CMD} create --profile=tket --test-folder=None recipes/tket +${CONAN_CMD} create --profile=tket recipes/symengine tket/stable +# Use header-only version of spdlog: +# https://github.com/conan-io/conan-docker-tools/issues/303#issuecomment-922492130 +${CONAN_CMD} create --profile=tket --test-folder=None -o tket:spdlog_ho=True recipes/tket ${CONAN_CMD} create --profile=tket --test-folder=None recipes/pybind11 cd /tket/pytket @@ -40,7 +42,7 @@ do cd /tket/pytket export PYEX=/opt/python/${pyX}/bin/python export PYVER=`${PYEX} -c 'import sys; print(".".join(map(str, sys.version_info[:3])))'` - ${PYEX} -m pip install -U pip setuptools_scm - ${PYEX} setup.py bdist_wheel -d "tmpwheel_${PYVER}" + ${PYEX} -m pip install -U pip build + ${PYEX} -m build --outdir "tmpwheel_${PYVER}" auditwheel repair "tmpwheel_${PYVER}/pytket-"*".whl" -w "audited/${PYVER}/" done diff --git a/.github/workflows/pytket_docs.yml b/.github/workflows/pytket_docs.yml deleted file mode 100644 index 958a087a95..0000000000 --- a/.github/workflows/pytket_docs.yml +++ /dev/null @@ -1,83 +0,0 @@ -name: Pytket Docs - -on: - push: - branches: - - 'docs/**' - -jobs: - docs: - name: build docs - runs-on: ubuntu-20.04 - env: - PYTKET_SKIP_REGISTRATION: "true" - strategy: - matrix: - python-version: ['3.9'] - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: '0' - - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* - - name: Get current time - uses: srfrnk/current-time@v1.1.0 - id: current_time - with: - format: YYYYMMDDHHmmss - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Cache ccache data - uses: actions/cache@v2 - with: - path: ~/.ccache - key: ${{ runner.os }}-tket-ccache-${{ steps.current_time.outputs.formattedTime }} - restore-keys: | - ${{ runner.os }}-tket-ccache- - - name: Get pip cache - id: pip-cache - run: | - python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" - - name: Cache pip - uses: actions/cache@v2 - with: - path: ${{ steps.pip-cache.outputs.dir }} - # Look to see if there is a cache hit for the corresponding requirements file - key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('pytket/**/requirements*.txt') }} - restore-keys: | - ${{ runner.os }}-pip-${{ matrix.python-version }}- - ${{ runner.os }}-pip- - - name: Install prerequisites - run: | - sudo apt-get update - sudo apt-get install ninja-build ccache graphviz - - name: Install conan - run: | - pip install conan - conan profile new tket --detect - conan profile update options.tket:shared=True tket - conan profile update settings.compiler.libcxx=libstdc++11 tket - - name: Build symengine - run: conan create --profile=tket recipes/symengine - - name: Build tket - run: conan create --profile=tket recipes/tket - - name: Install pybind11 - run: conan create --profile=tket recipes/pybind11 - - name: Build pytket - run: | - cd pytket - pip install -e . -v - - name: Install docs dependencies - run: | - pip install -r pytket/docs/requirements.txt - - name: Test building docs - timeout-minutes: 20 - run: | - ./.github/workflows/build-docs - - name: Upload docs - if: github.event_name == 'push' - uses: actions/upload-artifact@v2 - with: - name: pytket-docs - path: pytket/docs/build/html/ diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4b7be91347..10e5223f5d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -39,6 +39,8 @@ jobs: build_macos_wheels: name: Build macos wheels runs-on: macos-11 + env: + CONAN_REVISIONS_ENABLED: 1 steps: - uses: actions/checkout@v2 with: @@ -65,19 +67,13 @@ jobs: pip install conan conan profile new tket --detect --force conan profile update options.tket:shared=True tket + conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force conan install --profile=tket boost/1.78.0@ --build=missing - conan create --profile=tket recipes/symengine conan create --profile=tket recipes/tket - name: Install pybind11 run: conan create --profile=tket recipes/pybind11 - name: Build wheel (3.8) run: .github/workflows/build_macos_wheel - - name: Set up Python 3.8 - uses: actions/setup-python@v2 - with: - python-version: '3.9' - - name: Build wheel (3.8) - run: .github/workflows/build_macos_wheel - name: Set up Python 3.9 uses: actions/setup-python@v2 with: @@ -98,6 +94,8 @@ jobs: build_macos_M1_wheels: name: Build macos (M1) wheels runs-on: [self-hosted, macos, M1] + env: + CONAN_REVISIONS_ENABLED: 1 defaults: run: shell: "/usr/bin/arch -arch arm64e /bin/bash {0}" @@ -112,8 +110,8 @@ jobs: pyenv shell tket-3.8 conan profile new tket --detect --force conan profile update options.tket:shared=True tket + conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan --force conan install --profile=tket boost/1.78.0@ --build=missing - conan create --profile=tket recipes/symengine conan create --profile=tket recipes/tket conan create --profile=tket recipes/pybind11 .github/workflows/build_macos_m1_wheel @@ -129,6 +127,8 @@ jobs: build_Windows_wheels: name: Build Windows wheels runs-on: windows-2019 + env: + CONAN_REVISIONS_ENABLED: 1 steps: - uses: actions/checkout@v2 with: @@ -160,14 +160,14 @@ jobs: conan profile update options.tket:shared=True tket $conan_cmd = (gcm conan).Path echo "CONAN_CMD=${conan_cmd}" >> $GITHUB_ENV + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan - name: Cache tket build id: cache-tket uses: actions/cache@v2 with: path: C:\Users\runneradmin\.conan\data\tket - key: ${{ runner.os }}-tket-tket-${{ steps.hash_tket_source.outputs.tket_hash }}-8 - - name: Build symengine - run: conan create --profile=tket recipes/symengine + key: ${{ runner.os }}-tket-tket-${{ steps.hash_tket_source.outputs.tket_hash }}-9 - name: Build tket if: steps.cache-tket.outputs.cache-hit != 'true' run: conan create --profile=tket recipes/tket @@ -179,10 +179,9 @@ jobs: python-version: '3.8' - name: Build wheel (3.8) run: | - pip install wheel cd pytket - python -m pip install -U pip setuptools_scm - python setup.py bdist_wheel -d "${{ github.workspace }}/wheelhouse/3.8" + python -m pip install -U pip build + python -m build --outdir "${{ github.workspace }}/wheelhouse/3.8" - uses: actions/upload-artifact@v2 with: name: Windows_wheels @@ -193,10 +192,9 @@ jobs: python-version: '3.9' - name: Build wheel (3.9) run: | - pip install wheel cd pytket - python -m pip install -U pip setuptools_scm - python setup.py bdist_wheel -d "${{ github.workspace }}/wheelhouse/3.9" + python -m pip install -U pip build + python -m build --outdir "${{ github.workspace }}/wheelhouse/3.9" - uses: actions/upload-artifact@v2 with: name: Windows_wheels @@ -207,10 +205,9 @@ jobs: python-version: '3.10' - name: Build wheel (3.10) run: | - pip install wheel cd pytket - python -m pip install -U pip setuptools_scm - python setup.py bdist_wheel -d "${{ github.workspace }}/wheelhouse/3.10" + python -m pip install -U pip build + python -m build --outdir "${{ github.workspace }}/wheelhouse/3.10" - uses: actions/upload-artifact@v2 with: name: Windows_wheels diff --git a/.github/workflows/valgrind.yml b/.github/workflows/valgrind.yml new file mode 100644 index 0000000000..6197c60688 --- /dev/null +++ b/.github/workflows/valgrind.yml @@ -0,0 +1,79 @@ +name: valgrind check +on: + pull_request: + branches: + - develop + + schedule: + # 03:00 every Monday morning + - cron: '0 3 * * 1' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true +jobs: + changes: + runs-on: ubuntu-20.04 + outputs: + tket: ${{ steps.filter.outputs.tket }} + steps: + - uses: actions/checkout@v2 + - uses: dorny/paths-filter@v2 + id: filter + with: + base: ${{ github.ref }} + filters: | + tket: + - 'tket/**' + check: + runs-on: ubuntu-20.04 + needs: changes + if: needs.changes.outputs.tket == 'true' + env: + CC: gcc-10 + CXX: g++-10 + CONAN_REVISIONS_ENABLED: 1 + steps: + - uses: actions/checkout@v2 + - name: cache ccache data + uses: actions/cache@v2 + with: + path: ~/.ccache + key: ${{ runner.os }}-tket-ccache-${{ steps.current_time.outputs.formattedTime }} + restore-keys: | + ${{ runner.os }}-tket-ccache- + - name: apt update + run: sudo apt update + - name: Install conan + id: conan + run: | + pip install conan + conan profile new tket --detect + conan profile update settings.compiler.libcxx=libstdc++11 tket + conan profile update options.tket:shared=True tket + - name: set option to run full test suite + if: github.event_name == 'schedule' + run: conan profile update options.tket-tests:full=True tket + - name: add remote + run: conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan + - name: install tex components + run: | + sudo apt install texlive texlive-latex-extra latexmk + mkdir -p ~/texmf/tex/latex + wget http://mirrors.ctan.org/graphics/pgf/contrib/quantikz/tikzlibraryquantikz.code.tex -P ~/texmf/tex/latex + - name: install valgrind + run: sudo apt install valgrind + - name: install ninja and ccache + run: sudo apt-get install ninja-build ccache + - name: build tket + run: | + conan install recipes/tket --install-folder=build/tket --profile=tket + conan build recipes/tket --configure --build-folder=build/tket --source-folder=tket/src + conan build recipes/tket --build --build-folder=build/tket + conan export-pkg recipes/tket -f --build-folder=build/tket --source-folder=tket/src + - name: build tket tests + run: | + conan install recipes/tket-tests --install-folder=build/tket-tests --profile=tket + conan build recipes/tket-tests --configure --build-folder=build/tket-tests --source-folder=tket/tests + conan build recipes/tket-tests --build --build-folder=build/tket-tests + - name: run tests under valgrind + run: valgrind --error-exitcode=1 ./build/tket-tests/bin/test_tket diff --git a/README.md b/README.md index 87e35d551d..221f6fb767 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,18 @@ recommended in the warning message: conan profile update settings.compiler.libcxx=libstdc++11 tket ``` +Add the `tket.conan` repository to your remotes: + +```shell +conan remote add tket-conan https://tket.jfrog.io/artifactory/api/conan/tket-conan +``` + +Enable revisions: + +```shell +conan config set general.revisions_enabled=1 +``` + We want to build shared rather than static libraries, so set this in the profile: @@ -135,7 +147,9 @@ The `symengine` dependency is built from a local conan recipe. Run: conan create --profile=tket recipes/symengine ``` -to build it. +to build it. If you are using a conan configuration supported by the CI +(see above under "Build tools"), this is unnecessary as a pre-built package +will be downloaded from the `tket-conan` repository when you build `tket`. ### Building tket @@ -149,13 +163,6 @@ conan create --profile=tket recipes/tket to build the tket library. -Note: by default, `tket` uses the header-only version of `spdlog`. This avoids -an -[issue](https://github.com/conan-io/conan-docker-tools/issues/303#issuecomment-922492130) -with an undefined symbol when run in some Linux virtual environments, but makes -builds slower. For faster local builds you can supply the option -`-o tket:spdlog_ho=False` to the above `conan create` command. - To build and run the tket tests: ```shell @@ -165,6 +172,10 @@ conan create --profile=tket recipes/tket-tests If you want to build them without running them, pass `--test-folder None` to the `conan` command. (You can still run them manually afterwards.) +Some tests (those that add significantly to the runtime) are not built by +default. To build all tests, add `-o tket-tests:full=True` to the above +`conan create` command. + There is also a small set of property-based tests which you can build and run with: diff --git a/pytket/CMakeLists.txt b/pytket/CMakeLists.txt index ba9845c40b..0116777cc2 100644 --- a/pytket/CMakeLists.txt +++ b/pytket/CMakeLists.txt @@ -38,11 +38,15 @@ else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Werror") endif() +list(APPEND TKET_EXTRA_LIBS + ${CONAN_LIBS_FMT} ${CONAN_LIBS_SPDLOG} ${CONAN_LIBS_SYMENGINE}) + pybind11_add_module(circuit binders/circuit/main.cpp binders/circuit/unitid.cpp binders/circuit/boxes.cpp binders/circuit/classical.cpp + binders/circuit/library.cpp binders/circuit/Circuit/main.cpp binders/circuit/Circuit/add_op.cpp binders/circuit/Circuit/add_classical_op.cpp) @@ -53,32 +57,33 @@ target_link_libraries(circuit PRIVATE tket-Gate tket-Ops tket-OpType - tket-Routing + tket-Mapping + tket-TokenSwapping tket-Simulation tket-Utils) -target_link_libraries(circuit PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(circuit PRIVATE ${TKET_EXTRA_LIBS}) if (WIN32) # For boost::uuid: target_link_libraries(circuit PRIVATE bcrypt) endif() -pybind11_add_module(routing binders/routing.cpp) -target_include_directories(routing PRIVATE binders/include) -target_link_libraries(routing PRIVATE +pybind11_add_module(mapping binders/mapping.cpp) +target_include_directories(mapping PRIVATE binders/include) +target_link_libraries(mapping PRIVATE + tket-ArchAwareSynth tket-Architecture tket-Characterisation tket-Circuit tket-Gate tket-Graphs + tket-Mapping tket-Ops tket-OpType - tket-Routing + tket-PauliGraph + tket-Placement + tket-Transformations + tket-TokenSwapping tket-Utils) -target_link_libraries(routing PRIVATE ${CONAN_LIBS_SYMENGINE}) -if (WIN32) - # For boost::uuid: - target_link_libraries(routing PRIVATE bcrypt) -endif() pybind11_add_module(transform binders/transform.cpp) target_include_directories(transform PRIVATE binders/include) @@ -95,7 +100,7 @@ target_link_libraries(transform PRIVATE tket-PauliGraph tket-Transformations tket-Utils) -target_link_libraries(transform PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(transform PRIVATE ${TKET_EXTRA_LIBS}) if (WIN32) # For boost::uuid: target_link_libraries(transform PRIVATE bcrypt) @@ -112,14 +117,15 @@ target_link_libraries(predicates PRIVATE tket-Converters tket-Gate tket-Graphs + tket-Mapping tket-Ops tket-OpType tket-PauliGraph tket-Predicates - tket-Routing + tket-TokenSwapping tket-Transformations tket-Utils) -target_link_libraries(predicates PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(predicates PRIVATE ${TKET_EXTRA_LIBS}) if (WIN32) # For boost::uuid: target_link_libraries(predicates PRIVATE bcrypt) @@ -136,33 +142,39 @@ target_link_libraries(passes PRIVATE tket-Converters tket-Gate tket-Graphs + tket-Mapping tket-Ops tket-OpType tket-PauliGraph tket-Predicates - tket-Routing + tket-TokenSwapping tket-Transformations tket-Utils) -target_link_libraries(passes PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(passes PRIVATE ${TKET_EXTRA_LIBS}) if (WIN32) # For boost::uuid: target_link_libraries(passes PRIVATE bcrypt) endif() -pybind11_add_module(program binders/program.cpp) -target_include_directories(program PRIVATE binders/include) -target_link_libraries(program PRIVATE + +pybind11_add_module(architecture binders/architecture.cpp) +target_include_directories(architecture PRIVATE binders/include) +target_link_libraries(architecture PRIVATE + tket-Architecture + tket-Graphs + tket-Utils) +target_link_libraries(architecture PRIVATE ${TKET_EXTRA_LIBS}) + + +pybind11_add_module(placement binders/placement.cpp) +target_include_directories(placement PRIVATE binders/include) +target_link_libraries(placement PRIVATE + tket-Placement + tket-Architecture tket-Circuit - tket-Gate - tket-Ops - tket-OpType - tket-Program tket-Utils) -target_link_libraries(program PRIVATE ${CONAN_LIBS_SYMENGINE}) -if (WIN32) - # For boost::uuid: - target_link_libraries(program PRIVATE bcrypt) -endif() +target_link_libraries(placement PRIVATE ${TKET_EXTRA_LIBS}) + pybind11_add_module(partition binders/partition.cpp) target_include_directories(partition PRIVATE binders/include) @@ -177,7 +189,7 @@ target_link_libraries(partition PRIVATE tket-OpType tket-PauliGraph tket-Utils) -target_link_libraries(partition PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(partition PRIVATE ${TKET_EXTRA_LIBS}) if (WIN32) # For boost::uuid: target_link_libraries(partition PRIVATE bcrypt) @@ -187,19 +199,19 @@ pybind11_add_module(pauli binders/pauli.cpp) target_include_directories(pauli PRIVATE binders/include) target_link_libraries(pauli PRIVATE tket-Utils) -target_link_libraries(pauli PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(pauli PRIVATE ${TKET_EXTRA_LIBS}) pybind11_add_module(logging binders/logging.cpp) target_include_directories(logging PRIVATE binders/include) target_link_libraries(logging PRIVATE tket-Utils) -target_link_libraries(logging PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(logging PRIVATE ${TKET_EXTRA_LIBS}) pybind11_add_module(utils_serialization binders/utils_serialization.cpp) target_include_directories(utils_serialization PRIVATE binders/include) target_link_libraries(utils_serialization PRIVATE tket-Utils) -target_link_libraries(utils_serialization PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(utils_serialization PRIVATE ${TKET_EXTRA_LIBS}) pybind11_add_module(tailoring binders/tailoring.cpp) target_include_directories(tailoring PRIVATE binders/include) @@ -216,7 +228,7 @@ target_link_libraries(tailoring PRIVATE tket-OpType tket-PauliGraph tket-Utils) -target_link_libraries(tailoring PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(tailoring PRIVATE ${TKET_EXTRA_LIBS}) if (WIN32) # For boost::uuid: target_link_libraries(tailoring PRIVATE bcrypt) @@ -234,7 +246,7 @@ target_link_libraries(tableau PRIVATE tket-OpType tket-PauliGraph tket-Utils) -target_link_libraries(tableau PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(tableau PRIVATE ${TKET_EXTRA_LIBS}) if (WIN32) # For boost::uuid: target_link_libraries(tableau PRIVATE bcrypt) @@ -247,4 +259,4 @@ target_include_directories(zx PRIVATE binders/include) target_link_libraries(zx PRIVATE tket-Utils tket-ZX) -target_link_libraries(zx PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(zx PRIVATE ${TKET_EXTRA_LIBS}) diff --git a/pytket/binders/architecture.cpp b/pytket/binders/architecture.cpp new file mode 100644 index 0000000000..e587c3b455 --- /dev/null +++ b/pytket/binders/architecture.cpp @@ -0,0 +1,185 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Architecture/Architecture.hpp" + +#include +#include +#include +#include + +#include "Circuit/Circuit.hpp" +#include "Utils/Json.hpp" +#include "binder_json.hpp" +#include "binder_utils.hpp" +#include "typecast.hpp" + +namespace py = pybind11; +using json = nlohmann::json; + +namespace tket { + +PYBIND11_MODULE(architecture, m) { + py::class_>( + m, "Architecture", + "Class describing the connectivity of qubits on a general device.") + .def( + py::init([](const std::vector> + &connections) { return Architecture(connections); }), + "The constructor for an architecture with connectivity " + "between qubits.\n\n:param connections: A list of pairs " + "representing qubit indices that can perform two-qubit " + "operations", + py::arg("connections")) + .def( + py::init> &>(), + "The constructor for an architecture with connectivity " + "between qubits.\n\n:param connections: A list of pairs " + "representing Nodes that can perform two-qubit operations", + py::arg("connections")) + .def( + "__repr__", + [](const Architecture &arc) { + return ""; + }) + .def( + "get_distance", &Architecture::get_distance, + "given two nodes in Architecture, " + "returns distance between them", + py::arg("node_0"), py::arg("node_1")) + .def( + "valid_operation", &Architecture::valid_operation, + "Returns true if the given operation acting on the given ", + "nodes can be executed on the Architecture connectivity graph." + "\n\n:param uids: list of UnitIDs validity is being checked for", + py::arg("uids")) + .def( + "get_adjacent_nodes", &Architecture::get_neighbour_nodes, + "given a node, returns adjacent nodes in Architecture.", + py::arg("node")) + .def_property_readonly( + "nodes", &Architecture::get_all_nodes_vec, + "Returns all nodes of architecture as Node objects.") + .def_property_readonly( + "coupling", &Architecture::get_all_edges_vec, + "Returns the coupling map of the Architecture as " + "UnitIDs. ") + .def( + "to_dict", [](const Architecture &arch) { return json(arch); }, + "Return a JSON serializable dict representation of " + "the Architecture.\n" + ":return: dict containing nodes and links.") + .def_static( + "from_dict", [](const json &j) { return j.get(); }, + "Construct Architecture instance from JSON serializable " + "dict representation of the Architecture.") + // as far as Python is concerned, Architectures are immutable + .def( + "__deepcopy__", + [](const Architecture &arc, py::dict = py::dict()) { return arc; }) + .def( + "__repr__", + [](const Architecture &arc) { + return ""; + }) + .def(py::self == py::self); + py::class_, Architecture>( + m, "SquareGrid", + "Architecture class for qubits arranged in a square lattice of " + "given number of rows and columns. Qubits are arranged with qubits " + "values increasing first along rows then along columns i.e. for a " + "3 x 3 grid:\n\n 0 1 2\n\n 3 4 5\n\n 6 7 8") + .def( + py::init(), + "The constructor for a Square Grid architecture with some " + "undirected connectivity between qubits.\n\n:param n_rows: " + "The number of rows in the grid\n:param n_columns: The number " + "of columns in the grid", + py::arg("n_rows"), py::arg("n_columns")) + .def( + py::init(), + "The constructor for a Square Grid architecture with some " + "undirected connectivity between qubits.\n\n:param n_rows: " + "The number of rows in the grid\n:param n_columns: The number " + "of columns in the grid\n:param n_layers: The number of " + "layers of grids", + py::arg("n_rows"), py::arg("n_columns"), py::arg("n_layers")) + .def( + "squind_to_qind", + [](const SquareGrid &self, const unsigned row, const unsigned col) { + return self.squind_to_qind(row, col); + }, + "Converts a (row,column) index for a square grid to a " + "single " + "qubit index\n\n:param row: The given row index\n:param " + "column: The given column index\n:return: the " + "corresponding " + "global qubit index", + py::arg("row"), py::arg("column")) + .def( + "qind_to_squind", &SquareGrid::qind_to_squind, + "Converts a single qubit index to a (row,column) index for a " + "square grid.\n\n:param index: The global qubit " + "index\n:return: the corresponding grid index as a pair " + "(row,column)", + py::arg("index")) + // as far as Python is concerned, Architectures are immutable + .def( + "__deepcopy__", + [](const SquareGrid &arc, py::dict = py::dict()) { return arc; }) + .def("__repr__", [](const SquareGrid &arc) { + return ""; + }); + py::class_, Architecture>( + m, "RingArch", + "Architecture class for number of qubits arranged in a ring.") + .def( + py::init(), + "The constructor for a RingArchitecture with some undirected " + "connectivity between qubits.\n\n:param number of qubits", + py::arg("nodes")) + .def("__repr__", [](const RingArch &arc) { + return ""; + }); + py::class_( + m, "FullyConnected", + "An architecture with full connectivity between qubits.") + .def( + py::init(), + "Construct a fully-connected architecture." + "\n\n:param n: number of qubits", + py::arg("n")) + .def( + "__repr__", + [](const FullyConnected &arc) { + return ""; + }) + .def(py::self == py::self) + .def_property_readonly( + "nodes", &FullyConnected::get_all_nodes_vec, + "All nodes of the architecture as :py:class:`Node` objects.") + .def( + "to_dict", [](const FullyConnected &arch) { return json(arch); }, + "JSON-serializable dict representation of the architecture." + "\n\n:return: dict containing nodes") + .def_static( + "from_dict", [](const json &j) { return j.get(); }, + "Construct FullyConnected instance from dict representation."); +} +} // namespace tket diff --git a/pytket/binders/circuit/Circuit/add_classical_op.cpp b/pytket/binders/circuit/Circuit/add_classical_op.cpp index 1e22ec774c..d74dd85f3b 100644 --- a/pytket/binders/circuit/Circuit/add_classical_op.cpp +++ b/pytket/binders/circuit/Circuit/add_classical_op.cpp @@ -54,11 +54,13 @@ void init_circuit_add_classical_op( c.def( "add_c_transform", [](Circuit &circ, const std::vector &values, - const std::vector &args, const std::string &name) { + const std::vector &args, + const std::string &name) -> Circuit & { unsigned n_args = args.size(); std::shared_ptr op = std::make_shared(n_args, values, name); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "Appends a purely classical transformation, defined by a table of " "values, to " @@ -76,20 +78,23 @@ void init_circuit_add_classical_op( .def( "add_c_transform", [](Circuit &circ, const std::vector &values, - const std::vector &args, const std::string &name) { + const std::vector &args, + const std::string &name) -> Circuit & { unsigned n_args = args.size(); std::shared_ptr op = std::make_shared(n_args, values, name); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "See :py:meth:`add_c_transform`.", py::arg("values"), py::arg("args"), py::arg("name") = "ClassicalTransform") .def( "add_c_setbits", [](Circuit &circ, const std::vector &values, - const std::vector args) { + const std::vector args) -> Circuit & { std::shared_ptr op = std::make_shared(values); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "Appends an operation to set some bit values." "\n\n:param values: values to set" @@ -99,21 +104,23 @@ void init_circuit_add_classical_op( .def( "add_c_setbits", [](Circuit &circ, const std::vector &values, - const std::vector args) { + const std::vector args) -> Circuit & { std::shared_ptr op = std::make_shared(values); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "See :py:meth:`add_c_setbits`.", py::arg("values"), py::arg("args")) .def( "add_c_copybits", [](Circuit &circ, const std::vector &args_in, - const std::vector &args_out) { + const std::vector &args_out) -> Circuit & { unsigned n_args_in = args_in.size(); std::shared_ptr op = std::make_shared(n_args_in); std::vector args = args_in; args.insert(args.end(), args_out.begin(), args_out.end()); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "Appends a classical copy operation" "\n\n:param args_in: source bits" @@ -123,13 +130,14 @@ void init_circuit_add_classical_op( .def( "add_c_copybits", [](Circuit &circ, const std::vector &args_in, - const std::vector &args_out) { + const std::vector &args_out) -> Circuit & { unsigned n_args_in = args_in.size(); std::shared_ptr op = std::make_shared(n_args_in); std::vector args = args_in; args.insert(args.end(), args_out.begin(), args_out.end()); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "See :py:meth:`add_c_copybits`.", py::arg("args_in"), py::arg("args_out")) @@ -137,13 +145,14 @@ void init_circuit_add_classical_op( "add_c_predicate", [](Circuit &circ, const std::vector &values, const std::vector &args_in, unsigned arg_out, - const std::string &name) { + const std::string &name) -> Circuit & { unsigned n_args_in = args_in.size(); std::shared_ptr op = std::make_shared(n_args_in, values, name); std::vector args = args_in; args.push_back(arg_out); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "Appends a classical predicate, defined by a truth table, to the end " "of the " @@ -161,13 +170,14 @@ void init_circuit_add_classical_op( "add_c_predicate", [](Circuit &circ, const std::vector &values, const std::vector &args_in, Bit arg_out, - const std::string &name) { + const std::string &name) -> Circuit & { unsigned n_args_in = args_in.size(); std::shared_ptr op = std::make_shared(n_args_in, values, name); std::vector args = args_in; args.push_back(arg_out); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "See :py:meth:`add_c_predicate`.", py::arg("values"), py::arg("args_in"), py::arg("arg_out"), @@ -176,13 +186,14 @@ void init_circuit_add_classical_op( "add_c_modifier", [](Circuit &circ, const std::vector &values, const std::vector &args_in, unsigned arg_inout, - const std::string &name) { + const std::string &name) -> Circuit & { unsigned n_args_in = args_in.size(); std::shared_ptr op = std::make_shared(n_args_in, values, name); std::vector args = args_in; args.push_back(arg_inout); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "Appends a classical modifying operation, defined by a truth table, " "to the " @@ -202,13 +213,14 @@ void init_circuit_add_classical_op( "add_c_modifier", [](Circuit &circ, const std::vector &values, const std::vector &args_in, Bit arg_inout, - const std::string &name) { + const std::string &name) -> Circuit & { unsigned n_args_in = args_in.size(); std::shared_ptr op = std::make_shared(n_args_in, values, name); std::vector args = args_in; args.push_back(arg_inout); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "See :py:meth:`add_c_modifier`.", py::arg("values"), py::arg("args_in"), py::arg("arg_inout"), @@ -216,15 +228,15 @@ void init_circuit_add_classical_op( .def( "add_c_and", [](Circuit &circ, unsigned arg0_in, unsigned arg1_in, - unsigned arg_out) { + unsigned arg_out) -> Circuit & { if (arg0_in == arg_out) { - return circ.add_op(AndWithOp(), {arg1_in, arg_out}); + circ.add_op(AndWithOp(), {arg1_in, arg_out}); } else if (arg1_in == arg_out) { - return circ.add_op(AndWithOp(), {arg0_in, arg_out}); + circ.add_op(AndWithOp(), {arg0_in, arg_out}); } else { - return circ.add_op( - AndOp(), {arg0_in, arg1_in, arg_out}); + circ.add_op(AndOp(), {arg0_in, arg1_in, arg_out}); } + return circ; }, "Appends a binary AND operation to the end of the circuit." "\n\n:param arg0_in: first input bit" @@ -234,28 +246,31 @@ void init_circuit_add_classical_op( py::arg("arg0_in"), py::arg("arg1_in"), py::arg("arg_out")) .def( "add_c_and", - [](Circuit &circ, Bit arg0_in, Bit arg1_in, Bit arg_out) { + [](Circuit &circ, Bit arg0_in, Bit arg1_in, + Bit arg_out) -> Circuit & { if (arg0_in == arg_out) { - return circ.add_op(AndWithOp(), {arg1_in, arg_out}); + circ.add_op(AndWithOp(), {arg1_in, arg_out}); } else if (arg1_in == arg_out) { - return circ.add_op(AndWithOp(), {arg0_in, arg_out}); + circ.add_op(AndWithOp(), {arg0_in, arg_out}); } else { - return circ.add_op(AndOp(), {arg0_in, arg1_in, arg_out}); + circ.add_op(AndOp(), {arg0_in, arg1_in, arg_out}); } + return circ; }, "See :py:meth:`add_c_and`.", py::arg("arg0_in"), py::arg("arg1_in"), py::arg("arg_out")) .def( "add_c_or", [](Circuit &circ, unsigned arg0_in, unsigned arg1_in, - unsigned arg_out) { + unsigned arg_out) -> Circuit & { if (arg0_in == arg_out) { - return circ.add_op(OrWithOp(), {arg1_in, arg_out}); + circ.add_op(OrWithOp(), {arg1_in, arg_out}); } else if (arg1_in == arg_out) { - return circ.add_op(OrWithOp(), {arg0_in, arg_out}); + circ.add_op(OrWithOp(), {arg0_in, arg_out}); } else { - return circ.add_op(OrOp(), {arg0_in, arg1_in, arg_out}); + circ.add_op(OrOp(), {arg0_in, arg1_in, arg_out}); } + return circ; }, "Appends a binary OR operation to the end of the circuit." "\n\n:param arg0_in: first input bit" @@ -265,29 +280,31 @@ void init_circuit_add_classical_op( py::arg("arg0_in"), py::arg("arg1_in"), py::arg("arg_out")) .def( "add_c_or", - [](Circuit &circ, Bit arg0_in, Bit arg1_in, Bit arg_out) { + [](Circuit &circ, Bit arg0_in, Bit arg1_in, + Bit arg_out) -> Circuit & { if (arg0_in == arg_out) { - return circ.add_op(OrWithOp(), {arg1_in, arg_out}); + circ.add_op(OrWithOp(), {arg1_in, arg_out}); } else if (arg1_in == arg_out) { - return circ.add_op(OrWithOp(), {arg0_in, arg_out}); + circ.add_op(OrWithOp(), {arg0_in, arg_out}); } else { - return circ.add_op(OrOp(), {arg0_in, arg1_in, arg_out}); + circ.add_op(OrOp(), {arg0_in, arg1_in, arg_out}); } + return circ; }, "See :py:meth:`add_c_or`.", py::arg("arg0_in"), py::arg("arg1_in"), py::arg("arg_out")) .def( "add_c_xor", [](Circuit &circ, unsigned arg0_in, unsigned arg1_in, - unsigned arg_out) { + unsigned arg_out) -> Circuit & { if (arg0_in == arg_out) { - return circ.add_op(XorWithOp(), {arg1_in, arg_out}); + circ.add_op(XorWithOp(), {arg1_in, arg_out}); } else if (arg1_in == arg_out) { - return circ.add_op(XorWithOp(), {arg0_in, arg_out}); + circ.add_op(XorWithOp(), {arg0_in, arg_out}); } else { - return circ.add_op( - XorOp(), {arg0_in, arg1_in, arg_out}); + circ.add_op(XorOp(), {arg0_in, arg1_in, arg_out}); } + return circ; }, "Appends a binary XOR operation to the end of the circuit." "\n\n:param arg0_in: first input bit" @@ -297,21 +314,24 @@ void init_circuit_add_classical_op( py::arg("arg0_in"), py::arg("arg1_in"), py::arg("arg_out")) .def( "add_c_xor", - [](Circuit &circ, Bit arg0_in, Bit arg1_in, Bit arg_out) { + [](Circuit &circ, Bit arg0_in, Bit arg1_in, + Bit arg_out) -> Circuit & { if (arg0_in == arg_out) { - return circ.add_op(XorWithOp(), {arg1_in, arg_out}); + circ.add_op(XorWithOp(), {arg1_in, arg_out}); } else if (arg1_in == arg_out) { - return circ.add_op(XorWithOp(), {arg0_in, arg_out}); + circ.add_op(XorWithOp(), {arg0_in, arg_out}); } else { - return circ.add_op(XorOp(), {arg0_in, arg1_in, arg_out}); + circ.add_op(XorOp(), {arg0_in, arg1_in, arg_out}); } + return circ; }, "See :py:meth:`add_c_xor`.", py::arg("arg0_in"), py::arg("arg1_in"), py::arg("arg_out")) .def( "add_c_not", - [](Circuit &circ, unsigned arg_in, unsigned arg_out) { - return circ.add_op(NotOp(), {arg_in, arg_out}); + [](Circuit &circ, unsigned arg_in, unsigned arg_out) -> Circuit & { + circ.add_op(NotOp(), {arg_in, arg_out}); + return circ; }, "Appends a NOT operation to the end of the circuit." "\n\n:param arg_in: input bit" @@ -320,20 +340,23 @@ void init_circuit_add_classical_op( py::arg("arg_in"), py::arg("arg_out")) .def( "add_c_not", - [](Circuit &circ, Bit arg_in, Bit arg_out) { - return circ.add_op(NotOp(), {arg_in, arg_out}); + [](Circuit &circ, Bit arg_in, Bit arg_out) -> Circuit & { + circ.add_op(NotOp(), {arg_in, arg_out}); + return circ; }, "See :py:meth:`add_c_not`.", py::arg("arg_in"), py::arg("arg_out")) .def( "add_c_range_predicate", [](Circuit &circ, uint32_t a, uint32_t b, - const std::vector &args_in, unsigned arg_out) { + const std::vector &args_in, + unsigned arg_out) -> Circuit & { unsigned n_args_in = args_in.size(); std::shared_ptr op = std::make_shared(n_args_in, a, b); std::vector args = args_in; args.push_back(arg_out); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "Appends a range-predicate operation to the end of the circuit." "\n\n:param minval: lower bound of input in little-endian encoding" @@ -341,19 +364,19 @@ void init_circuit_add_classical_op( "\n:param args_in: input bits" "\n:param arg_out: output bit (distinct from input bits)" "\n:return: the new :py:class:`Circuit`", - py::arg("minval") = 0, - py::arg("maxval") = std::numeric_limits::max(), - py::arg("args_in"), py::arg("arg_out")) + py::arg("minval"), py::arg("maxval"), py::arg("args_in"), + py::arg("arg_out")) .def( "add_c_range_predicate", [](Circuit &circ, uint32_t a, uint32_t b, - const std::vector &args_in, Bit arg_out) { + const std::vector &args_in, Bit arg_out) -> Circuit & { unsigned n_args_in = args_in.size(); std::shared_ptr op = std::make_shared(n_args_in, a, b); std::vector args = args_in; args.push_back(arg_out); - return circ.add_op(op, args); + circ.add_op(op, args); + return circ; }, "Appends a range-predicate operation to the end of the circuit." "\n\n:param minval: lower bound of input in little-endian encoding" @@ -361,9 +384,8 @@ void init_circuit_add_classical_op( "\n:param args_in: input bits" "\n:param arg_out: output bit (distinct from input bits)" "\n:return: the new :py:class:`Circuit`", - py::arg("minval") = 0, - py::arg("maxval") = std::numeric_limits::max(), - py::arg("args_in"), py::arg("arg_out")) + py::arg("minval"), py::arg("maxval"), py::arg("args_in"), + py::arg("arg_out")) .def( "add_c_and_to_registers", [](Circuit &circ, const BitRegister ®0_in, @@ -442,7 +464,7 @@ void init_circuit_add_classical_op( .def( "add_c_not_to_registers", [](Circuit &circ, const BitRegister ®_in, - const BitRegister ®_out) { + const BitRegister ®_out) -> Circuit & { apply_classical_op_to_registers(circ, NotOp(), {reg_in, reg_out}); return circ; }, diff --git a/pytket/binders/circuit/Circuit/add_op.cpp b/pytket/binders/circuit/Circuit/add_op.cpp index ba5a4c7c62..58fd520f92 100644 --- a/pytket/binders/circuit/Circuit/add_op.cpp +++ b/pytket/binders/circuit/Circuit/add_op.cpp @@ -409,11 +409,11 @@ void init_circuit_add_op(py::class_> &c) { py::arg("expression"), py::arg("target")) .def( "add_custom_gate", - [](Circuit *circ, const composite_def_ptr_t &def, + [](Circuit *circ, const composite_def_ptr_t &definition, const std::vector ¶ms, const std::vector &qubits, const py::kwargs &kwargs) { return add_box_method( - circ, std::make_shared(def, params), qubits, + circ, std::make_shared(definition, params), qubits, kwargs); }, "Append an instance of a :py:class:`CustomGateDef` to the " @@ -422,7 +422,7 @@ void init_circuit_add_op(py::class_> &c) { "instantiate the gate with, in halfturns\n:param qubits: " "Indices of the qubits to append the box to" "\n:return: the new :py:class:`Circuit`", - py::arg("def"), py::arg("params"), py::arg("qubits")) + py::arg("definition"), py::arg("params"), py::arg("qubits")) .def( "add_barrier", [](Circuit *circ, const unit_vector_t &units) { @@ -538,11 +538,11 @@ void init_circuit_add_op(py::class_> &c) { py::arg("phasepolybox"), py::arg("qubits")) .def( "add_custom_gate", - [](Circuit *circ, const composite_def_ptr_t &def, + [](Circuit *circ, const composite_def_ptr_t &definition, const std::vector ¶ms, const qubit_vector_t &qubits, const py::kwargs &kwargs) { return add_box_method( - circ, std::make_shared(def, params), + circ, std::make_shared(definition, params), {qubits.begin(), qubits.end()}, kwargs); }, "Append an instance of a :py:class:`CustomGateDef` to the " @@ -551,13 +551,13 @@ void init_circuit_add_op(py::class_> &c) { "instantiate the gate with, in halfturns\n:param qubits: " "The qubits to append the box to" "\n:return: the new :py:class:`Circuit`", - py::arg("def"), py::arg("params"), py::arg("qubits")) + py::arg("definition"), py::arg("params"), py::arg("qubits")) .def( "add_assertion", [](Circuit *circ, const ProjectorAssertionBox &box, const std::vector &qubits, const std::optional &ancilla, - const std::optional &name) { + const std::optional &name) -> Circuit * { std::vector qubits_; for (unsigned i = 0; i < qubits.size(); ++i) { qubits_.push_back(Qubit(qubits[i])); @@ -568,7 +568,8 @@ void init_circuit_add_op(py::class_> &c) { } else { ancilla_ = Qubit(ancilla.value()); } - return circ->add_assertion(box, qubits_, ancilla_, name); + circ->add_assertion(box, qubits_, ancilla_, name); + return circ; }, "Append a :py:class:`ProjectorAssertionBox` to the circuit." "\n\n:param box: ProjectorAssertionBox to append" @@ -583,8 +584,9 @@ void init_circuit_add_op(py::class_> &c) { [](Circuit *circ, const ProjectorAssertionBox &box, const std::vector &qubits, const std::optional &ancilla, - const std::optional &name) { - return circ->add_assertion(box, qubits, ancilla, name); + const std::optional &name) -> Circuit * { + circ->add_assertion(box, qubits, ancilla, name); + return circ; }, "Append a :py:class:`ProjectorAssertionBox` to the circuit." "\n\n:param box: ProjectorAssertionBox to append" @@ -598,13 +600,14 @@ void init_circuit_add_op(py::class_> &c) { "add_assertion", [](Circuit *circ, const StabiliserAssertionBox &box, const std::vector &qubits, const unsigned &ancilla, - const std::optional &name) { + const std::optional &name) -> Circuit * { std::vector qubits_; for (unsigned i = 0; i < qubits.size(); ++i) { qubits_.push_back(Qubit(qubits[i])); } Qubit ancilla_(ancilla); - return circ->add_assertion(box, qubits_, ancilla_, name); + circ->add_assertion(box, qubits_, ancilla_, name); + return circ; }, "Append a :py:class:`StabiliserAssertionBox` to the circuit." "\n\n:param box: StabiliserAssertionBox to append" @@ -618,8 +621,9 @@ void init_circuit_add_op(py::class_> &c) { "add_assertion", [](Circuit *circ, const StabiliserAssertionBox &box, const std::vector &qubits, const Qubit &ancilla, - const std::optional &name) { - return circ->add_assertion(box, qubits, ancilla, name); + const std::optional &name) -> Circuit * { + circ->add_assertion(box, qubits, ancilla, name); + return circ; }, "Append a :py:class:`StabiliserAssertionBox` to the circuit." "\n\n:param box: StabiliserAssertionBox to append" diff --git a/pytket/binders/circuit/Circuit/main.cpp b/pytket/binders/circuit/Circuit/main.cpp index 1e90538ac1..3d12e2d523 100644 --- a/pytket/binders/circuit/Circuit/main.cpp +++ b/pytket/binders/circuit/Circuit/main.cpp @@ -26,8 +26,8 @@ #include "Circuit/Command.hpp" #include "Gate/OpPtrFunctions.hpp" #include "Gate/SymTable.hpp" +#include "Mapping/Verification.hpp" #include "Ops/Op.hpp" -#include "Routing/Verification.hpp" #include "Simulation/CircuitSimulator.hpp" #include "UnitRegister.hpp" #include "Utils/Json.hpp" @@ -211,6 +211,78 @@ void init_circuit(py::module &m) { "Adds BitRegister to Circuit" "\n\n:param register: BitRegister ", py::arg("register")) + .def( + "get_c_register", + [](Circuit &circ, const std::string &name) { + register_t reg = circ.get_reg(name); + if (reg.size() == 0 || + reg.begin()->second.type() != UnitType::Bit) { + throw CircuitInvalidity( + "Cannot find classical register with name \"" + name + "\"."); + } + return BitRegister(name, reg.size()); + }, + "Get the classical register with the given name.\n\n:param name: " + "name for the register\n:return: the retrieved " + ":py:class:`BitRegister`", + py::arg("name")) + .def_property_readonly( + "c_registers", + [](Circuit &circ) { + bit_vector_t all_bits = circ.all_bits(); + std::map bits_map; + std::vector b_regs; + for (Bit bit : all_bits) { + auto it = bits_map.find(bit.reg_name()); + if (it == bits_map.end()) { + bits_map.insert({bit.reg_name(), 1}); + } else { + it->second++; + } + } + for (auto const &it : bits_map) { + b_regs.push_back(BitRegister(it.first, it.second)); + } + return b_regs; + }, + "Get all classical registers.\n\n:return: List of " + ":py:class:`BitRegister`") + .def( + "get_q_register", + [](Circuit &circ, const std::string &name) { + register_t reg = circ.get_reg(name); + if (reg.size() == 0 || + reg.begin()->second.type() != UnitType::Qubit) { + throw CircuitInvalidity( + "Cannot find quantum register with name \"" + name + "\"."); + } + return QubitRegister(name, reg.size()); + }, + "Get the quantum register with the given name.\n\n:param name: " + "name for the register\n:return: the retrieved " + ":py:class:`QubitRegister`", + py::arg("name")) + .def_property_readonly( + "q_registers", + [](Circuit &circ) { + qubit_vector_t all_qbs = circ.all_qubits(); + std::map qbs_map; + std::vector q_regs; + for (Qubit qb : all_qbs) { + auto it = qbs_map.find(qb.reg_name()); + if (it == qbs_map.end()) { + qbs_map.insert({qb.reg_name(), 1}); + } else { + it->second++; + } + } + for (auto const &it : qbs_map) { + q_regs.push_back(QubitRegister(it.first, it.second)); + } + return q_regs; + }, + "Get all quantum registers.\n\n:return: List of " + ":py:class:`QubitRegister`") .def( "add_qubit", &Circuit::add_qubit, "Constructs a single qubit with the given id.\n\n:param id: " @@ -247,6 +319,9 @@ void init_circuit(py::module &m) { "A qubit will feature in this map if it is " "measured and neither it nor the bit containing the " "measurement result is subsequently acted on") + .def_property_readonly( + "opgroups", &Circuit::get_opgroups, + "A set of all opgroup names in the circuit") .def( "flatten_registers", &Circuit::flatten_registers, "Combines all qubits into a single register namespace with " diff --git a/pytket/binders/circuit/boxes.cpp b/pytket/binders/circuit/boxes.cpp index 59802aa265..00ed6c6c92 100644 --- a/pytket/binders/circuit/boxes.cpp +++ b/pytket/binders/circuit/boxes.cpp @@ -158,7 +158,8 @@ void init_boxes(py::module &m) { .def_property_readonly( "name", &CompositeGateDef::get_name, "The readable name of the gate") .def_property_readonly( - "def", &CompositeGateDef::get_def, "Return definition as a circuit.") + "definition", &CompositeGateDef::get_def, + "Return definition as a circuit.") .def_property_readonly( "arity", &CompositeGateDef::n_args, "The number of real parameters for the gate"); diff --git a/pytket/binders/circuit/library.cpp b/pytket/binders/circuit/library.cpp new file mode 100644 index 0000000000..e548a4b7a3 --- /dev/null +++ b/pytket/binders/circuit/library.cpp @@ -0,0 +1,198 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "Circuit/CircPool.hpp" +#include "binder_utils.hpp" +#include "typecast.hpp" + +namespace py = pybind11; + +namespace tket { + +void init_library(py::module &m) { + /* Circuit library */ + py::module_ library_m = m.def_submodule( + "_library", + "Library of reusable circuits and circuit generator functions."); + library_m.def( + "_BRIDGE_using_CX_0", &CircPool::BRIDGE_using_CX_0, + "Equivalent to BRIDGE, using four CX, first CX has control on qubit 0"); + library_m.def( + "_BRIDGE_using_CX_1", &CircPool::BRIDGE_using_CX_1, + "Equivalent to BRIDGE, using four CX, first CX has control on qubit 1"); + library_m.def( + "_CX_using_flipped_CX", &CircPool::CX_using_flipped_CX, + "Equivalent to CX[0,1], using a CX[1,0] and four H gates"); + library_m.def( + "_CX_using_ECR", &CircPool::CX_using_ECR, + "Equivalent to CX, using only ECR, Rx and U3 gates"); + library_m.def( + "_CX_using_ZZMax", &CircPool::CX_using_ZZMax, + "Equivalent to CX, using only ZZMax, Rx and Rz gates"); + library_m.def( + "_CX_using_XXPhase_0", &CircPool::CX_using_XXPhase_0, + "Equivalent to CX, using only XXPhase, Rx, Ry and Rz gates"); + + library_m.def( + "_CX_using_XXPhase_1", &CircPool::CX_using_XXPhase_1, + "Equivalent to CX, using only XXPhase, Rx, Ry and Rz gates"); + library_m.def( + "_CX_VS_CX_reduced", &CircPool::CX_VS_CX_reduced, + "CX-reduced form of CX/V,S/CX"); + library_m.def( + "_CX_V_CX_reduced", &CircPool::CX_V_CX_reduced, + "CX-reduced form of CX/V,-/CX"); + library_m.def( + "_CX_S_CX_reduced", &CircPool::CX_S_CX_reduced, + "CX-reduced form of CX/-,S/CX (= ZZMax)"); + library_m.def( + "_CX_V_S_XC_reduced", &CircPool::CX_V_S_XC_reduced, + "CX-reduced form of CX/V,-/S,-/XC"); + library_m.def( + "_CX_S_V_XC_reduced", &CircPool::CX_S_V_XC_reduced, + "CX-reduced form of CX/-,S/-,V/XC"); + library_m.def( + "_CX_XC_reduced", &CircPool::CX_XC_reduced, "CX-reduced form of CX/XC"); + library_m.def( + "_SWAP_using_CX_0", &CircPool::SWAP_using_CX_0, + "Equivalent to SWAP, using three CX, outer CX have control on qubit 0"); + library_m.def( + "_SWAP_using_CX_1", &CircPool::SWAP_using_CX_1, + "Equivalent to SWAP, using three CX, outer CX have control on qubit 1"); + library_m.def( + "_two_Rz1", &CircPool::two_Rz1, + "A two-qubit circuit with an Rz(1) on each qubit"); + library_m.def("_X1_CX", &CircPool::X1_CX, "X[1]; CX[0,1]"); + library_m.def("_Z0_CX", &CircPool::Z0_CX, "Z[0]; CX[0,1] "); + + library_m.def( + "_CCX_modulo_phase_shift", &CircPool::CCX_modulo_phase_shift, + "Equivalent to CCX up to phase shift, using three CX. Warning: this is " + "not equivalent to CCX up to global phase so cannot be used as a direct " + "substitution except when the phase reversal can be cancelled. Its " + "unitary representation is like CCX but with a -1 at the (5,5) " + "position."); + library_m.def( + "_CCX_normal_decomp", &CircPool::CCX_normal_decomp, + "Equivalent to CCX, using five CX"); + library_m.def( + "_C3X_normal_decomp", &CircPool::C3X_normal_decomp, + "Equivalent to CCCX, using 14 CX"); + library_m.def( + "_C4X_normal_decomp", &CircPool::C4X_normal_decomp, + "Equivalent to CCCCX, using 36 CX "); + library_m.def( + "_ladder_down", &CircPool::ladder_down, "CX[0,1]; CX[2,0]; CCX[0,1,2]"); + library_m.def( + "_ladder_down_2", &CircPool::ladder_down_2, + "CX[0,1]; X[0]; X[2]; CCX[0,1,2]"); + library_m.def( + "_ladder_up", &CircPool::ladder_up, "CCX[0,1,2]; CX[2,0]; CX[2,1]"); + library_m.def("_X", &CircPool::X, "Just an X gate"); + library_m.def("_CX", &CircPool::CX, "Just a CX[0,1] gate"); + library_m.def("_CCX", &CircPool::CCX, "Just a CCX[0,1,2] gate"); + library_m.def("_BRIDGE", &CircPool::BRIDGE, "Just a BRIDGE[0,1,2] gate"); + library_m.def("_H_CZ_H", &CircPool::H_CZ_H, "H[1]; CZ[0,1]; H[1] "); + library_m.def( + "_CZ_using_CX", &CircPool::CZ_using_CX, + "Equivalent to CZ, using CX and single-qubit gates"); + library_m.def( + "_CY_using_CX", &CircPool::CY_using_CX, + "Equivalent to CY, using CX and single-qubit gates"); + library_m.def( + "_CH_using_CX", &CircPool::CH_using_CX, + "Equivalent to CH, using CX and single-qubit gates"); + library_m.def( + "_CV_using_CX", &CircPool::CV_using_CX, + "Equivalent to CV, using CX and single-qubit gates "); + library_m.def( + "_CVdg_using_CX", &CircPool::CVdg_using_CX, + "Equivalent to CVdg, using CX and single-qubit gates"); + library_m.def( + "_CSX_using_CX", &CircPool::CSX_using_CX, + "Equivalent to CSX, using CX and single-qubit gates"); + library_m.def( + "_CSXdg_using_CX", &CircPool::CSXdg_using_CX, + "Equivalent to CSXdg, using CX and single-qubit gates"); + library_m.def( + "_CSWAP_using_CX", &CircPool::CSWAP_using_CX, + "Equivalent to CSWAP, using CX and single-qubit gates "); + library_m.def( + "_ECR_using_CX", &CircPool::ECR_using_CX, + "Equivalent to ECR, using CX, Rx and U3 gates "); + library_m.def( + "_ZZMax_using_CX", &CircPool::ZZMax_using_CX, + "Equivalent to ZZMax, using CX, Rz and U3 gates "); + library_m.def( + "_CRz_using_CX", &CircPool::CRz_using_CX, + "Equivalent to CRz, using CX and Rz gates"); + library_m.def( + "_CRx_using_CX", &CircPool::CRx_using_CX, + "Equivalent to CRx, using CX, H and Rx gates"); + library_m.def( + "_CRy_using_CX", &CircPool::CRy_using_CX, + "Equivalent to CRy, using CX and Ry gates"); + library_m.def( + "_CU1_using_CX", &CircPool::CU1_using_CX, + "Equivalent to CU1, using CX and U1 gates"); + library_m.def( + "_CU3_using_CX", &CircPool::CU3_using_CX, + "Equivalent to CU1, using CX, U1 and U3 gates"); + library_m.def( + "_ISWAP_using_CX", &CircPool::ISWAP_using_CX, + "Equivalent to ISWAP, using CX, U3 and Rz gates"); + library_m.def( + "_XXPhase_using_CX", &CircPool::XXPhase_using_CX, + "Equivalent to XXPhase, using CX and U3 gates "); + library_m.def( + "_YYPhase_using_CX", &CircPool::YYPhase_using_CX, + "Equivalent to YYPhase, using CX, Rz and U3 gates"); + library_m.def( + "_ZZPhase_using_CX", &CircPool::ZZPhase_using_CX, + "Equivalent to ZZPhase, using CX and Rz gates"); + library_m.def( + "_XXPhase3_using_CX", &CircPool::XXPhase3_using_CX, + "Equivalent to 3-qubit MS interaction, using CX and U3 gates"); + library_m.def( + "_ESWAP_using_CX", &CircPool::XXPhase3_using_CX, + "Equivalent to ESWAP, using CX, X, S, Ry and U1 gates"); + library_m.def( + "_FSim_using_CX", &CircPool::FSim_using_CX, + "Equivalent to Fsim, using CX, X, S, U1 and U3 gates "); + library_m.def( + "_PhasedISWAP_using_CX", &CircPool::PhasedISWAP_using_CX, + "Equivalent to PhasedISWAP, using CX, U3 and Rz gates"); + library_m.def( + "_NPhasedX_using_CX", &CircPool::NPhasedX_using_CX, + "Unwrap NPhasedX, into number_of_qubits PhasedX gates"); + + library_m.def( + "_TK1_to_PhasedXRz", &CircPool::tk1_to_PhasedXRz, + "A tk1 equivalent circuit given tk1 parameters in terms of PhasedX, Rz"); + library_m.def( + "_TK1_to_RzRx", &CircPool::tk1_to_rzrx, + "A tk1 equivalent circuit given tk1 parameters in terms of Rz, Rx"); + library_m.def( + "_TK1_to_RzH", &CircPool::tk1_to_rzh, + "A tk1 equivalent circuit given tk1 parameters in terms of Rz, H"); + library_m.def( + "_TK1_to_RzSX", &CircPool::tk1_to_rzsx, + "A tk1 equivalent circuit given tk1 parameters in terms of Rz, Sx"); + library_m.def( + "_TK1_to_TK1", &CircPool::tk1_to_tk1, + "A circuit of a single tk1 gate with given parameters"); +} +} // namespace tket diff --git a/pytket/binders/circuit/main.cpp b/pytket/binders/circuit/main.cpp index f97a138bb8..5deec9eac6 100644 --- a/pytket/binders/circuit/main.cpp +++ b/pytket/binders/circuit/main.cpp @@ -26,10 +26,12 @@ #include "Ops/Op.hpp" #include "Utils/Constants.hpp" #include "Utils/Symbols.hpp" +#include "binder_json.hpp" #include "binder_utils.hpp" #include "typecast.hpp" namespace py = pybind11; +using json = nlohmann::json; namespace tket { @@ -37,6 +39,7 @@ void init_unitid(py::module &m); void init_circuit(py::module &m); void init_classical(py::module &m); void init_boxes(py::module &m); +void init_library(py::module &m); PYBIND11_MODULE(circuit, m) { init_unitid(m); @@ -443,7 +446,10 @@ PYBIND11_MODULE(circuit, m) { "A classical operation applied to multiple bits simultaneously") .value( "ClassicalExpBox", OpType::ClassicalExpBox, - "A box for holding compound classical operations on Bits."); + "A box for holding compound classical operations on Bits.") + .def_static( + "from_name", [](const json &j) { return j.get(); }, + "Construct from name"); py::enum_( m, "BasisOrder", "Enum for readout basis and ordering.\n" @@ -494,9 +500,10 @@ PYBIND11_MODULE(circuit, m) { [](const Command &com) { return com.get_op_ptr()->free_symbols(); }, ":return: set of symbolic parameters for the command"); - init_circuit(m); + init_library(m); init_boxes(m); init_classical(m); + init_circuit(m); m.def( "fresh_symbol", &SymTable::fresh_symbol, diff --git a/pytket/binders/mapping.cpp b/pytket/binders/mapping.cpp new file mode 100644 index 0000000000..139375e780 --- /dev/null +++ b/pytket/binders/mapping.cpp @@ -0,0 +1,158 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include + +#include "Circuit/Circuit.hpp" +#include "Mapping/AASLabelling.hpp" +#include "Mapping/AASRoute.hpp" +#include "Mapping/BoxDecomposition.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/MultiGateReorder.hpp" +#include "Mapping/RoutingMethodCircuit.hpp" +#include "binder_utils.hpp" + +namespace py = pybind11; + +namespace tket { +PYBIND11_MODULE(mapping, m) { + py::class_>( + m, "RoutingMethod", + "Parent class for RoutingMethod, for inheritance purposes only, not for " + "usage.") + .def(py::init<>()); + + py::class_< + RoutingMethodCircuit, std::shared_ptr, + RoutingMethod>( + m, "RoutingMethodCircuit", + "The RoutingMethod class captures a method for partially mapping logical " + "subcircuits to physical operations as permitted by some architecture. " + "Ranked RoutingMethod objects are used by the MappingManager to route " + "whole circuits.") + .def( + py::init< + const std::function< + std::tuple( + const Circuit&, const ArchitecturePtr&)>&, + unsigned, unsigned>(), + "Constructor for a routing method defined by partially routing " + "subcircuits.\n\n:param route_subcircuit: A function declaration " + "that given a Circuit and Architecture object, returns a tuple " + "containing a bool informing MappingManager whether to substitute " + "the returned circuit into the circuit being routed, " + "a new modified circuit, the initial logical to physical " + "qubit mapping of the modified circuit and the permutation of " + "logical to physical qubit mapping given operations in the " + "modified circuit\n:param max_size: The maximum number of gates " + "permitted in a subcircuit\n:param max_depth: The maximum permitted " + "depth of a subcircuit.", + py::arg("route_subcircuit"), py::arg("max_size"), + py::arg("max_depth")); + + py::class_< + LexiRouteRoutingMethod, std::shared_ptr, + RoutingMethod>( + m, "LexiRouteRoutingMethod", + "Defines a RoutingMethod object for mapping circuits that uses the " + "Lexicographical Comparison approach outlined in arXiv:1902.08091." + "Only supports 1-qubit, 2-qubit and barrier gates.") + .def( + py::init(), + "LexiRoute constructor.\n\n:param lookahead: Maximum depth of " + "lookahead employed when picking SWAP for purpose of logical to " + "physical mapping.", + py::arg("lookahead") = 10); + + py::class_< + AASRouteRoutingMethod, std::shared_ptr, + RoutingMethod>( + m, "AASRouteRoutingMethod", + "Defines a RoutingMethod object for mapping circuits that uses the " + "architecture aware synthesis method implemented in tket.") + .def( + py::init(), + "AASRouteRoutingMethod constructor.\n\n:param aaslookahead: " + "recursive interation depth of the architecture aware synthesis." + "method.", + py::arg("aaslookahead")); + + py::class_< + AASLabellingMethod, std::shared_ptr, RoutingMethod>( + m, "AASLabellingMethod", + "Defines a Labeling Method for aas for labelling all unplaced qubits in " + "a circuit") + .def(py::init<>(), "AASLabellingMethod constructor."); + + py::class_< + LexiLabellingMethod, std::shared_ptr, RoutingMethod>( + m, "LexiLabellingMethod", + "Defines a RoutingMethod for labelling Qubits that uses the " + "Lexicographical Comparison approach outlined in arXiv:1902.08091.") + .def(py::init<>(), "LexiLabellingMethod constructor."); + + py::class_< + MultiGateReorderRoutingMethod, + std::shared_ptr, RoutingMethod>( + m, "MultiGateReorderRoutingMethod", + "Defines a RoutingMethod object for commuting physically permitted " + "multi-qubit gates to the front of the subcircuit.") + .def( + py::init(), + "MultiGateReorderRoutingMethod constructor.\n\n:param max_depth: " + "Maximum number of layers of gates checked for simultaneous " + "commutation. " + "\n:param max_size: Maximum number of gates checked for simultaneous " + "commutation.", + py::arg("max_depth") = 10, py::arg("max_size") = 10); + + py::class_< + BoxDecompositionRoutingMethod, + std::shared_ptr, RoutingMethod>( + m, "BoxDecompositionRoutingMethod", + "Defines a RoutingMethod object for decomposing boxes.") + .def(py::init<>(), "BoxDecompositionRoutingMethod constructor."); + + py::class_( + m, "MappingManager", + "Defined by a pytket Architecture object, maps Circuit logical qubits " + "to physically permitted Architecture qubits. Mapping is completed by " + "sequential routing (full or partial) of subcircuits. A custom method " + "for routing (full or partial) of subcircuits can be defined in Python.") + .def( + py::init(), + "MappingManager constructor.\n\n:param architecture: pytket " + "Architecture object.", + py::arg("architecture")) + .def( + "route_circuit", &MappingManager::route_circuit, + "Maps from given logical circuit to physical circuit. Modification " + "defined by route_subcircuit, but typically this proceeds by " + "insertion of SWAP gates that permute logical qubits on physical " + "qubits.\n\n:param circuit: pytket circuit to be mapped" + "\n:param routing_methods: Ranked methods to use for routing " + "subcircuits. In given order, each method is sequentially checked " + "for viability, with the first viable method being used." + "\n:param label_isolated_qubits: will not label qubits without gates " + "or only single qubit gates on them if this is set false", + py::arg("circuit"), py::arg("routing_methods"), + py::arg("label_isolated_qubits") = true); +} +} // namespace tket \ No newline at end of file diff --git a/pytket/binders/passes.cpp b/pytket/binders/passes.cpp index 2369a93aba..67e85ed87f 100644 --- a/pytket/binders/passes.cpp +++ b/pytket/binders/passes.cpp @@ -15,6 +15,9 @@ #include #include "ArchAwareSynth/SteinerForest.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" #include "Predicates/PassLibrary.hpp" @@ -30,34 +33,29 @@ using json = nlohmann::json; namespace tket { -void update_routing_config(RoutingConfig &config, py::kwargs kwargs) { - if (kwargs.contains("swap_lookahead")) - config.depth_limit = py::cast(kwargs["swap_lookahead"]); - if (kwargs.contains("bridge_lookahead")) - config.distrib_limit = py::cast(kwargs["bridge_lookahead"]); - if (kwargs.contains("bridge_interactions")) - config.interactions_limit = - py::cast(kwargs["bridge_interactions"]); - if (kwargs.contains("bridge_exponent")) - config.distrib_exponent = py::cast(kwargs["bridge_exponent"]); -} static PassPtr gen_cx_mapping_pass_kwargs( const Architecture &arc, const PlacementPtr &placer, py::kwargs kwargs) { - RoutingConfig config = {}; - update_routing_config(config, kwargs); + std::vector config = { + std::make_shared(), + std::make_shared()}; + if (kwargs.contains("config")) { + config = py::cast>(kwargs["config"]); + } bool directed_cx = false; - if (kwargs.contains("directed_cx")) + if (kwargs.contains("directed_cx")) { directed_cx = py::cast(kwargs["directed_cx"]); + } bool delay_measures = true; - if (kwargs.contains("delay_measures")) + if (kwargs.contains("delay_measures")) { delay_measures = py::cast(kwargs["delay_measures"]); + } return gen_cx_mapping_pass(arc, placer, config, directed_cx, delay_measures); } -static PassPtr gen_default_routing_pass( - const Architecture &arc, py::kwargs kwargs) { - RoutingConfig config = {}; - update_routing_config(config, kwargs); +static PassPtr gen_default_routing_pass(const Architecture &arc) { + std::vector config = { + std::make_shared(), + std::make_shared()}; return gen_routing_pass(arc, config); } @@ -80,13 +78,6 @@ static PassPtr gen_default_aas_routing_pass( return gen_full_mapping_pass_phase_poly(arc, lookahead, cnotsynthtype); } -static PassPtr gen_full_mapping_pass_kwargs( - const Architecture &arc, const PlacementPtr &placer, py::kwargs kwargs) { - RoutingConfig config = {}; - update_routing_config(config, kwargs); - return gen_full_mapping_pass(arc, placer, config); -} - const PassPtr &DecomposeClassicalExp() { // a special box decomposer for Circuits containing // ClassicalExpBox @@ -373,23 +364,7 @@ PYBIND11_MODULE(passes, m) { "gates." "\n\n:param allow_swaps: whether to allow implicit wire swaps", py::arg("allow_swaps") = true); - m.def("RebaseCirq", &RebaseCirq, "Converts all gates to CZ, PhasedX and Rz."); - m.def( - "RebaseHQS", &RebaseHQS, "Converts all gates to ZZMax, PhasedX and Rz."); - m.def( - "RebaseProjectQ", &RebaseProjectQ, - "Converts all gates to SWAP, CRz, CX, CZ, H, X, Y, Z, S, T, V, Rx, " - "Ry and Rz."); - m.def( - "RebasePyZX", &RebasePyZX, - "Converts all gates to SWAP, CX, CZ, H, X, Z, S, T, Rx and Rz."); - m.def("RebaseQuil", &RebaseQuil, "Converts all gates to CZ, Rx and Rz."); m.def("RebaseTket", &RebaseTket, "Converts all gates to CX and TK1."); - m.def( - "RebaseUMD", &RebaseUMD, - "Converts all gates to XXPhase, PhasedX and Rz."); - m.def("RebaseUFR", &RebaseUFR, "Converts all gates to CX, Rz and H."); - m.def("RebaseOQC", &RebaseOQC, "Converts all gates to ECR, Rz and SX."); m.def( "RemoveRedundancies", &RemoveRedundancies, "Removes gate-inverse pairs, merges rotations, removes identity " @@ -413,9 +388,6 @@ PYBIND11_MODULE(passes, m) { m.def( "SquashTK1", &SquashTK1, "Squash sequences of single-qubit gates to TK1 gates."); - m.def( - "SquashHQS", &SquashHQS, - "Squash Rz and PhasedX gate sequences into an optimal form."); m.def( "FlattenRegisters", &FlattenRegisters, "Merges all quantum and classical registers into their " @@ -459,25 +431,24 @@ PYBIND11_MODULE(passes, m) { m.def( "RebaseCustom", &gen_rebase_pass, "Construct a custom rebase pass. This pass:\n(1) decomposes " - "multi-qubit gates not in the set of gate types `multiqs` to CX " - "gates;\n(2) if CX is not in `multiqs`, replaces CX gates with " + "multi-qubit gates not in the set of gate types `gateset` to CX " + "gates;\n(2) if CX is not in `gateset`, replaces CX gates with " "`cx_replacement`;\n(3) converts any single-qubit gates not in the " - "gate type set `singleqs` to the form " + "gate type set to the form " ":math:`\\mathrm{Rz}(a)\\mathrm{Rx}(b)\\mathrm{Rz}(c)` (in " "matrix-multiplication order, i.e. reverse order in the " "circuit);\n(4) applies the `tk1_replacement` function to each of " "these triples :math:`(a,b,c)` to generate replacement circuits." - "\n\n:param multiqs: The allowed multi-qubit operations in the " + "\n\n:param gateset: The allowed multi-qubit operations in the " "rebased circuit." "\n:param cx_replacement: The equivalent circuit to replace a CX " - "gate in the desired basis." - "\n:param singleqs: The allowed single-qubit operations in the " - "rebased circuit." + "gate using two qubit gates from the desired basis (can use any single " + "qubit OpTypes)." "\n:param tk1_replacement: A function which, given the parameters of " "an Rz(a)Rx(b)Rz(c) triple, returns an equivalent circuit in the " "desired basis." "\n:return: a pass that rebases to the given gate set", - py::arg("multiqs"), py::arg("cx_replacement"), py::arg("singleqs"), + py::arg("gateset"), py::arg("cx_replacement"), py::arg("tk1_replacement")); m.def( @@ -500,10 +471,6 @@ PYBIND11_MODULE(passes, m) { "RoutingPass", &gen_default_routing_pass, "Construct a pass to route to the connectivity graph of an " ":py:class:`Architecture`. Edge direction is ignored." - "\n\n:param arc: The architecture to use for connectivity information." - "\n:param \\**kwargs: Parameters for routing: " - "(int)swap_lookahead=50, (int)bridge_lookahead=4, " - "(int)bridge_interactions=2, (float)bridge_exponent=0." "\n:return: a pass that routes to the given device architecture", py::arg("arc")); @@ -514,24 +481,30 @@ PYBIND11_MODULE(passes, m) { ":py:class:`Architecture` Nodes", py::arg("placer")); + m.def( + "NaivePlacementPass", &gen_naive_placement_pass, + ":param architecture: The Architecture used for relabelling." + "\n:return: a pass to relabel :py:class:`Circuit` Qubits to " + ":py:class:`Architecture` Nodes", + py::arg("arc")); + m.def( "RenameQubitsPass", &gen_rename_qubits_pass, "Rename some or all qubits.", "\n\n:param qubit_map: map from old to new qubit names", py::arg("qubit_map")); m.def( - "FullMappingPass", &gen_full_mapping_pass_kwargs, + "FullMappingPass", &gen_full_mapping_pass, "Construct a pass to relabel :py:class:`Circuit` Qubits to " ":py:class:`Architecture` Nodes, and then route to the connectivity " "graph " "of an :py:class:`Architecture`. Edge direction is ignored." "\n\n:param arc: The architecture to use for connectivity information. " "\n:param placer: The Placement used for relabelling." - "\n:param \\**kwargs: Parameters for routing: " - "(int)swap_lookahead=50, (int)bridge_lookahead=4, " - "(int)bridge_interactions=2, (float)bridge_exponent=0." + "\n:param config: Parameters for routing, a list of RoutingMethod, each " + "method is checked and run if applicable in turn." "\n:return: a pass to perform the remapping", - py::arg("arc"), py::arg("placer")); + py::arg("arc"), py::arg("placer"), py::arg("config")); m.def( "DefaultMappingPass", &gen_default_mapping_pass, @@ -542,8 +515,10 @@ PYBIND11_MODULE(passes, m) { "Placement used " "is GraphPlacement." "\n\n:param arc: The Architecture used for connectivity information." + "\n:param delay_measures: Whether to commute measurements to the end " + "of the circuit, defaulting to true." "\n:return: a pass to perform the remapping", - py::arg("arc")); + py::arg("arc"), py::arg("delay_measures") = true); m.def( "AASRouting", &gen_default_aas_routing_pass, @@ -577,8 +552,6 @@ PYBIND11_MODULE(passes, m) { "\n\n:param arc: The Architecture used for connectivity information." "\n:param placer: The placement used for relabelling." "\n:param \\**kwargs: Parameters for routing: " - "(int)swap_lookahead=50, (int)bridge_lookahead=4, " - "(int)bridge_interactions=2, (float)bridge_exponent=0, " "(bool)directed_cx=false, (bool)delay_measures=true" "\n:return: a pass to perform the remapping", py::arg("arc"), py::arg("placer")); diff --git a/pytket/binders/pauli.cpp b/pytket/binders/pauli.cpp index 47e0cbc103..0b79d70098 100644 --- a/pytket/binders/pauli.cpp +++ b/pytket/binders/pauli.cpp @@ -60,14 +60,6 @@ PYBIND11_MODULE(pauli, m) { .def("__lt__", &QubitPauliString::operator<) .def("__getitem__", &QubitPauliString::get) .def("__setitem__", &QubitPauliString::set) - .def( - "to_dict", - [](const QubitPauliString &qps) { - PyErr_WarnEx( - PyExc_DeprecationWarning, - "to_dict() is deprecated, use the map property instead.", 1); - return qps.map; - }) .def_property_readonly( "map", [](const QubitPauliString &qps) { return qps.map; }, ":return: the QubitPauliString's underlying dict mapping " diff --git a/pytket/binders/routing.cpp b/pytket/binders/placement.cpp similarity index 50% rename from pytket/binders/routing.cpp rename to pytket/binders/placement.cpp index 0b28065942..1eaf602092 100644 --- a/pytket/binders/routing.cpp +++ b/pytket/binders/placement.cpp @@ -12,16 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "Routing/Routing.hpp" +#include "Placement/Placement.hpp" #include #include #include #include -#include "Architecture/Architecture.hpp" -#include "Circuit/Circuit.hpp" -#include "Transformations/Transform.hpp" #include "Utils/Json.hpp" #include "binder_json.hpp" #include "binder_utils.hpp" @@ -32,7 +29,6 @@ using json = nlohmann::json; namespace tket { -// definitely a better way of doing this ... void amend_config_from_kwargs(NoiseAwarePlacement &pobj, py::kwargs kwargs) { PlacementConfig config_ = pobj.get_config(); @@ -75,159 +71,7 @@ void place_with_map(Circuit &circ, qubit_mapping_t &qmap) { plobj.place_with_map(circ, qmap); } -std::pair route( - const Circuit &circuit, const Architecture &arc, py::kwargs kwargs) { - RoutingConfig config = {}; - if (kwargs.contains("swap_lookahead")) - config.depth_limit = py::cast(kwargs["swap_lookahead"]); - if (kwargs.contains("bridge_lookahead")) - config.distrib_limit = py::cast(kwargs["bridge_lookahead"]); - if (kwargs.contains("bridge_interactions")) - config.interactions_limit = - py::cast(kwargs["bridge_interactions"]); - if (kwargs.contains("bridge_exponent")) - config.distrib_exponent = py::cast(kwargs["bridge_exponent"]); - - Routing router(circuit, arc); - Circuit out = router.solve(config).first; - return {out, router.return_final_map()}; -} - -PYBIND11_MODULE(routing, m) { - py::class_>( - m, "NodeGraph", - "Abstract class for describing a device connectivity graph."); - - py::class_>( - m, "Architecture", - "Class describing the connectivity of qubits on a general device.") - .def( - py::init([](const std::vector> - &connections) { return Architecture(connections); }), - "The constructor for an architecture with connectivity " - "between qubits.\n\n:param connections: A list of pairs " - "representing qubit indices that can perform two-qubit " - "operations", - py::arg("connections")) - .def( - py::init>>(), - "The constructor for an architecture with connectivity " - "between qubits.\n\n:param connections: A list of pairs " - "representing Nodes that can perform two-qubit operations", - py::arg("connections")) - .def_property_readonly( - "nodes", &Architecture::get_all_nodes_vec, - "Returns all nodes of architecture as Node objects.") - .def_property_readonly( - "coupling", &Architecture::get_all_edges_vec, - "Returns the coupling map of the Architecture as " - "UnitIDs. ") - .def( - "to_dict", [](const Architecture &arch) { return json(arch); }, - "Return a JSON serializable dict representation of " - "the Architecture.\n" - ":return: dict containing nodes and links.") - .def_static( - "from_dict", [](const json &j) { return j.get(); }, - "Construct Architecture instance from JSON serializable " - "dict representation of the Architecture.") - // as far as Python is concerned, Architectures are immutable - .def( - "__deepcopy__", - [](const Architecture &arc, py::dict = py::dict()) { return arc; }) - .def( - "__repr__", - [](const Architecture &arc) { - return ""; - }) - .def(py::self == py::self); - py::class_>( - m, "SquareGrid", - "Architecture class for qubits arranged in a square lattice of " - "given number of rows and columns. Qubits are arranged with qubits " - "values increasing first along rows then along columns i.e. for a " - "3 x 3 grid:\n\n 0 1 2\n\n 3 4 5\n\n 6 7 8") - .def( - py::init(), - "The constructor for a Square Grid architecture with some " - "undirected connectivity between qubits.\n\n:param n_rows: " - "The number of rows in the grid\n:param n_columns: The number " - "of columns in the grid", - py::arg("n_rows"), py::arg("n_columns")) - .def( - py::init(), - "The constructor for a Square Grid architecture with some " - "undirected connectivity between qubits.\n\n:param n_rows: " - "The number of rows in the grid\n:param n_columns: The number " - "of columns in the grid\n:param n_layers: The number of " - "layers of grids", - py::arg("n_rows"), py::arg("n_columns"), py::arg("n_layers")) - .def( - "squind_to_qind", - [](const SquareGrid &self, const unsigned row, const unsigned col) { - return self.squind_to_qind(row, col); - }, - "Converts a (row,column) index for a square grid to a " - "single " - "qubit index\n\n:param row: The given row index\n:param " - "column: The given column index\n:return: the " - "corresponding " - "global qubit index", - py::arg("row"), py::arg("column")) - .def( - "qind_to_squind", &SquareGrid::qind_to_squind, - "Converts a single qubit index to a (row,column) index for a " - "square grid.\n\n:param index: The global qubit " - "index\n:return: the corresponding grid index as a pair " - "(row,column)", - py::arg("index")) - // as far as Python is concerned, Architectures are immutable - .def( - "__deepcopy__", - [](const SquareGrid &arc, py::dict = py::dict()) { return arc; }) - .def("__repr__", [](const SquareGrid &arc) { - return ""; - }); - py::class_>( - m, "RingArch", - "Architecture class for number of qubits arranged in a ring.") - .def( - py::init(), - "The constructor for a RingArchitecture with some undirected " - "connectivity between qubits.\n\n:param number of qubits", - py::arg("nodes")) - .def("__repr__", [](const RingArch &arc) { - return ""; - }); - py::class_>( - m, "FullyConnected", - "An architecture with full connectivity between qubits.") - .def( - py::init(), - "Construct a fully-connected architecture." - "\n\n:param n: number of qubits", - py::arg("n")) - .def( - "__repr__", - [](const FullyConnected &arc) { - return ""; - }) - .def(py::self == py::self) - .def_property_readonly( - "nodes", &FullyConnected::get_all_nodes_vec, - "All nodes of the architecture as :py:class:`Node` objects.") - .def( - "to_dict", [](const FullyConnected &arch) { return json(arch); }, - "JSON-serializable dict representation of the architecture." - "\n\n:return: dict containing nodes") - .def_static( - "from_dict", [](const json &j) { return j.get(); }, - "Construct FullyConnected instance from dict representation."); - +PYBIND11_MODULE(placement, m) { py::class_>( m, "Placement", "The base Placement class, contains methods for getting maps " @@ -241,17 +85,23 @@ PYBIND11_MODULE(routing, m) { py::arg("arc")) .def("__repr__", [](const Placement &) { return ""; }) - .def("place", &Placement::place, - "Relabels Circuit Qubits to Architecture Nodes and 'unplaced'. For " - "base Placement, all Qubits and labelled 'unplaced'. " - "\n\n:param circuit: The Circuit being relabelled.", - py::arg("circuit")) + .def("place", + [](const Placement &placement, Circuit &circ) { + return placement.place(circ); + }, + "Relabels Circuit Qubits to Architecture Nodes and 'unplaced'. For " + "base Placement, all Qubits and labelled 'unplaced'. " + "\n\n:param circuit: The Circuit being relabelled.", + py::arg("circuit")) .def_static( - "place_with_map", &Placement::place_with_map, - "Relabels Circuit Qubits to Architecture Nodes using given map. " - "\n\n:param circuit: The circuit being relabelled\n:param " - "qmap: The map from logical to physical qubits to apply.", - py::arg("circuit"), py::arg("qmap")) + "place_with_map", + [](Circuit &circ, qubit_mapping_t& qmap) { + return Placement::place_with_map(circ, qmap); + }, + "Relabels Circuit Qubits to Architecture Nodes using given map. " + "\n\n:param circuit: The circuit being relabelled\n:param " + "qmap: The map from logical to physical qubits to apply.", + py::arg("circuit"), py::arg("qmap")) .def("get_placement_map", &Placement::get_placement_map, "Returns a map from logical to physical qubits that is Architecture " "appropriate for the given Circuit. " @@ -370,26 +220,5 @@ PYBIND11_MODULE(routing, m) { "\n\n:param circuit: The Circuit being relabelled. \n:param qmap: " "The map from logical to physical qubits to apply.", py::arg("circuit"), py::arg("qmap")); - - m.def( - "route", - [](const Circuit &circuit, const Architecture &arc, py::kwargs kwargs) { - return route(circuit, arc, kwargs).first; - }, - "Routes the circuit subject to the connectivity of the input " - "architecture, given configuration settings." - "\n\n:param circuit: The circuit to be routed." - "\n:param architecture: A representation of the qubit connectivity " - "constraints of the device." - "\n:param \\**kwargs: Parameters for routing: " - "(int)swap_lookahead=50, (int)bridge_lookahead=4, " - "(int)bridge_interactions=2, (float)bridge_exponent=0, " - "\n:return: the routed :py:class:`Circuit`", - py::arg("circuit"), py::arg("architecture")); - m.def( - "_route_return_map", - [](const Circuit &circuit, const Architecture &arc, py::kwargs kwargs) { - return route(circuit, arc, kwargs); - }); } } // namespace tket diff --git a/pytket/binders/program.cpp b/pytket/binders/program.cpp deleted file mode 100644 index 86971cda56..0000000000 --- a/pytket/binders/program.cpp +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Program/Program.hpp" - -#include -#include -#include - -#include "typecast.hpp" - -namespace py = pybind11; - -namespace tket { - -template -static Program *add_gate_method( - Program *prog, OpType type, const std::vector ¶ms, - const std::vector &args, const py::kwargs &kwargs) { - if (kwargs.contains("condition_bits")) { - std::vector bits = py::cast>(kwargs["condition_bits"]); - unsigned n_args = args.size(), n_bits = bits.size(); - unsigned value = kwargs.contains("condition_value") - ? py::cast(kwargs["condition_value"]) - : (1u << n_bits) - 1; - Op_ptr con = std::make_shared( - get_op_ptr(type, params, n_args), n_bits, value); - std::vector new_args = bits; - new_args.insert(new_args.end(), args.begin(), args.end()); - prog->add_op(con, new_args); - } else { - prog->add_op(type, params, args); - } - return prog; -} - -PYBIND11_MODULE(program, m) { - py::class_( - m, "Program", - "Encapsulates a control flow graph for a quantum program. Each " - "basic block is a single quantum circuit which may include " - "classical instructions and OpenQASM-style conditional gates. " - "Branches are always made using a single condition bit. Allows " - "long sequences of operations to be applied conditionally or " - "repeatedly while some bit is true.") - .def(py::init<>(), "Constructs an empty program.") - .def( - py::init(), - "Constructs a program with a given number of quantum and " - "classical bits\n\n:param n_qubits: The number of qubits in " - "the program\n:param c_bits: The number of classical bits in " - "the program", - py::arg("n_qubits"), py::arg("n_bits") = 0) - .def("__str__", [](const Program &) { return ""; }) - .def( - "__repr__", - [](const Program &prog) { - std::stringstream ss; - ss << "["; - for (auto com : prog) { - ss << com.to_str() << " "; - } - ss << "]"; - return ss.str(); - }) - .def( - "__iter__", - [](const Program &prog) { - return py::make_iterator(prog.begin(), prog.end()); - }, - "Iterate through the program, a Command at a time.", - py::keep_alive< - 0, 1>() /* Essential: keep object alive while iterator exists */) - .def( - "add_q_register", &Program::add_q_register, - "Constructs a new quantum register with a given name and " - "number of qubits.\n\n:param name: Unique readable name for " - "the register\n:param size: Number of qubits " - "required\n:return: a map from index to the corresponding " - "UnitIDs", - py::arg("name"), py::arg("size")) - .def( - "add_c_register", &Program::add_c_register, - "Constructs a new classical register with a given name and " - "number of bits.\n\n:param name: Unique readable name for the " - "register\n:param size: Number of bits required\n:return: a " - "map from index to the corresponding UnitIDs", - py::arg("name"), py::arg("size")) - .def( - "add_qubit", &Program::add_qubit, - "Constructs a single qubit with the given id.\n\n:param id: " - "Unique id for the qubit\n:param reject_dups: Fail if there " - "is already a qubit in this program with the id. Default to " - "True", - py::arg("id"), py::arg("reject_dups") = true) - .def( - "add_bit", &Program::add_bit, - "Constructs a single bit with the given id.\n\n:param id: " - "Unique id for the bit\n:param reject_dups: Fail if there is " - "already a bit in this program with the id. Default to True", - py::arg("id"), py::arg("reject_dups") = true) - .def_property_readonly( - "qubits", &Program::all_qubits, - "A list of all qubit ids in the program") - .def_property_readonly( - "bits", &Program::all_bits, - "A list of all classical bit ids in the program") - .def_property_readonly( - "bit_readout", &Program::bit_readout, - "A map from bit to its (left-to-right) index in readouts " - "from backends (following the increasing lexicographic " - "order convention)") - .def_property_readonly( - "qubit_readout", &Program::qubit_readout, - "A map from qubit to its (left-to-right) index in readouts " - "from backends") - .def( - "get_commands", - [](const Program &prog) { - std::vector out; - for (Command c : prog) out.push_back(c); - return out; - }, - ":return: a list of all the Commands in the program") - .def( - "add_gate", - [](Program *prog, OpType type, const std::vector &args, - const py::kwargs &kwargs) { - return add_gate_method(prog, type, {}, args, kwargs); - }, - "Appends a single (non-parameterised) gate to the end of " - "the program on some particular qubits from the default " - "register ('q'). The number of qubits specified must match " - "the arity of the gate." - "\n\n:param type: The type of operation to add" - "\n:param args: The list of indices for the qubits/bits to " - "which the operation is applied" - "\n:param kwargs: Additional properties for classical " - "conditions" - "\n:return: the new :py:class:`Program`", - py::arg("type"), py::arg("args")) - .def( - "add_gate", - [](Program *prog, OpType type, const Expr &p, - const std::vector &args, const py::kwargs &kwargs) { - return add_gate_method(prog, type, {p}, args, kwargs); - }, - "Appends a single gate, parameterised by an expression, to " - "the end of the program on some particular qubits from the " - "default register ('q')." - "\n\n:param type: The type of gate to add" - "\n:param angle: The parameter for the gate in halfturns" - "\n:param args: The list of indices for the qubits/bits to " - "which the operation is applied" - "\n:param kwargs: Additional properties for classical " - "conditions" - "\n:return: the new :py:class:`Program`", - py::arg("type"), py::arg("angle"), py::arg("args")) - .def( - "add_gate", &add_gate_method, - "Appends a single gate, parameterised with a vector of " - "expressions corresponding to halfturns, to the end of the " - "program on some particular qubits from the default register " - "('q')." - "\n\n:param type: The type of gate to add" - "\n:param angles: The parameters for the gate in halfturns" - "\n:param args: The list of indices for the qubits/bits to " - "which the operation is applied" - "\n:param kwargs: Additional properties for classical " - "conditions" - "\n:return: the new :py:class:`Program`", - py::arg("type"), py::arg("angles"), py::arg("args")) - .def( - "add_gate", &add_gate_method, - "Appends a single gate to the end of the program" - "\n\n:param type: The type of gate to add" - "\n:param params: The parameters for the gate in halfturns" - "\n:param args: The qubits/bits to apply the gate to" - "\n:param kwargs: Additional properties for classical " - "conditions" - "\n:return: the new :py:class:`Program`", - py::arg("type"), py::arg("params"), py::arg("args")) - .def( - "append_circuit", - [](Program *prog, const Circuit &circ) { - prog->add_block(circ); - return prog; - }, - "Appends a circuit to the end of the program" - "\n\n:param circuit: The circuit to add" - "\n:return: the new :py:class:`Program`", - py::arg("circuit")) - .def( - "append", &Program::append, - "In-place sequential composition of programs, appending a " - "copy of the argument onto the end of `self`." - "\n\n:param prog: The program to be appended to the end of " - "`self`", - py::arg("prog")) - .def( - "append_if", &Program::append_if, - "In-place sequential composition of programs, performing " - "`body` after `self` if the `condition_bit` is found to be 1." - "\n\n:param condition_bit: A single bit condition." - "\n\n:param body: The program to be applied after `self` if " - "`condition_bit` is 1.", - py::arg("condition_bit"), py::arg("body")) - .def( - "append_if_else", &Program::append_if_else, - "In-place sequential composition of programs, performing " - "`if_body` after `self` if the `condition_bit` is found to be " - "1, and `else_body` if it is 0." - "\n\n:param condition_bit: A single bit condition." - "\n\n:param if_body: The program to be applied after `self` " - "if `condition_bit` is 1.", - "\n\n:param else_body: The program to be applied after `self` " - "if `condition_bit` is 0.", - py::arg("condition_bit"), py::arg("if_body"), py::arg("else_body")) - .def( - "append_while", &Program::append_while, - "In-place sequential composition of programs, performing " - "`body` after `self` repeatedly whilst the `condition_bit` is " - "found to be 1." - "\n\n:param condition_bit: A single bit condition." - "\n\n:param body: The program to be applied after `self` " - "repeatedly whilst `condition_bit` is 1.", - py::arg("condition_bit"), py::arg("body")); -} - -} // namespace tket diff --git a/pytket/binders/transform.cpp b/pytket/binders/transform.cpp index a2e891bc37..53434cce0a 100644 --- a/pytket/binders/transform.cpp +++ b/pytket/binders/transform.cpp @@ -21,7 +21,6 @@ #include #include "Circuit/Circuit.hpp" -#include "Routing/Routing.hpp" #include "Transformations/BasicOptimisation.hpp" #include "Transformations/Combinator.hpp" #include "Transformations/ContextualReduction.hpp" diff --git a/pytket/binders/zx/diagram.cpp b/pytket/binders/zx/diagram.cpp index 48503f0449..57fccad0c6 100644 --- a/pytket/binders/zx/diagram.cpp +++ b/pytket/binders/zx/diagram.cpp @@ -74,8 +74,8 @@ void ZXDiagramPybind::init_zxdiagram(py::module& m) { ":param out: Number of quantum outputs.\n" ":param classical_in: Number of classical inputs.\n" ":param classical_out: Number of classical outputs.", - py::arg("in"), py::arg("out"), py::arg("classical_in"), - py::arg("classical_out")) + py::arg("inputs"), py::arg("outputs"), py::arg("classical_inputs"), + py::arg("classical_outputs")) .def( py::init(), "Constructs a copy of an existing ZX diagram.\n\n" @@ -430,6 +430,31 @@ PYBIND11_MODULE(zx, m) { "value. Can either be Quantum or Classical - Quantum spiders can " "only have Quantum wires, Quantum wires on Classical spiders act as " "two wires. Can have arbitrary degree. No ports.") + .value( + "XY", ZXType::XY, + "A (postselected) XY qubit in MBQC. Corresponds to a Z spider with " + "negative phase.") + .value( + "XZ", ZXType::XZ, + "A (postselected) XZ qubit in MBQC. Corresponds to a 0.5-phase " + "(n+1)-ary Z spider connected to a phaseful 1-ary X spider.") + .value( + "YZ", ZXType::YZ, + "A (postselected) YZ qubit in MBQC. Corresponds to a 0-phase " + "(n+1)-ary Z spider connected to a phaseful 1-ary X spider.") + .value( + "PX", ZXType::PX, + "A (postselected) Pauli X qubit in MBQC. Corresponds to a Z spider " + "with phase either 0 (param=False) or 1 (param=True).") + .value( + "PY", ZXType::PY, + "A (postselected) Pauli Y qubit in MBQC. Corresponds to a Z spider " + "with phase either -0.5 (param=False) or +0.5 (param=True).") + .value( + "PZ", ZXType::PZ, + "A (postselected) Pauli Z qubit in MBQC. Corresponds to a 0-phase " + "(n+1)-ary Z spider connected to a 1-ary X spider with phase either " + "0 (param=False) or 1 (param=True).") .value( "Triangle", ZXType::Triangle, "A Triangle operator, [[1, 1], [0, 1]]. Can either be Quantum or " @@ -508,12 +533,18 @@ PYBIND11_MODULE(zx, m) { "The :py:class:`QuantumType` of the generator (if applicable).") .def("__eq__", &ZXGen::operator==) .def("__repr__", [](const ZXGen& gen) { return gen.get_name(); }); - py::class_, ZXGen>( - m, "BasicGen", + py::class_, ZXGen>( + m, "PhasedGen", + "Specialisation of :py:class:`ZXGen` for arbitrary-arity, symmetric " + "generators with a single continuous parameter.") + .def_property_readonly( + "param", &PhasedGen::get_param, "The parameter of the generator."); + py::class_, ZXGen>( + m, "CliffordGen", "Specialisation of :py:class:`ZXGen` for arbitrary-arity, symmetric " - "generators.") + "Clifford generators with a single boolean parameter.") .def_property_readonly( - "param", &BasicGen::get_param, "The parameter of the generator."); + "param", &CliffordGen::get_param, "The parameter of the generator."); py::class_, ZXGen>( m, "DirectedGen", "Specialisation of :py:class:`ZXGen` for asymmetric ZX generators which " diff --git a/pytket/conanfile.txt b/pytket/conanfile.txt index 93345c32b4..c72f77e3d4 100644 --- a/pytket/conanfile.txt +++ b/pytket/conanfile.txt @@ -1,7 +1,7 @@ [requires] tket/1.0.1 -pybind11/2.8.1 -nlohmann_json/3.10.4 +pybind11/2.9.1 +nlohmann_json/3.10.5 pybind11_json/0.2.11 [generators] diff --git a/pytket/docs/architecture.rst b/pytket/docs/architecture.rst new file mode 100644 index 0000000000..96b2a421d1 --- /dev/null +++ b/pytket/docs/architecture.rst @@ -0,0 +1,5 @@ +pytket.architecture +================================== +.. automodule:: pytket._tket.architecture + :members: + :special-members: __init__ diff --git a/pytket/docs/changelog.rst b/pytket/docs/changelog.rst index dbada60f38..a5b1d3ce54 100644 --- a/pytket/docs/changelog.rst +++ b/pytket/docs/changelog.rst @@ -1,7 +1,72 @@ Changelog ========= +1.0.0 (March 2022) +------------------ + +API changes: + +* ``Rebase`` and ``SquashHQS`` methods are removed. Specifically: + + * ``RebaseHQS`` + * ``RebaseProjectQ`` + * ``RebasePyZX`` + * ``RebaseQuil`` + * ``RebaseUMD`` + * ``RebaseUFR`` + * ``RebaseOQC`` + +* The deprecated ``QubitPauliString.to_dict`` method is removed. (Use the + ``map`` property instead.) +* The deprecated ``Backend.compile_circuit`` method is removed. (Use + ``get_compiled_circuit`` instead.) +* The ``routing`` module is removed. +* ``Placement``, ``LinePlacement``, ``GraphPlacement`` and ``NoiseAwarePlacement`` + are now imported from the ``placement`` module. +* ``Architecture``, ``SquareGrid``, ``RingArch`` and ``FullyConnected`` are now + imported from the ``architecture`` module. +* Methods for mapping logical to physical circuits are now available in the + ``mapping`` module, with a new API and new functionality. +* The keyword parameter and property ``def`` is now called ``definition`` in + ``Circuit.add_custom_gate`` and ``CustomGateDef``. +* ``RebaseCustom`` takes one allowed gateset parameter rather than separate single qubit and multiqubit gatesets. +* The ``Backend.characterisation`` property is removed. (Use + ``Backend.backend_info`` instead.) +* The ``QubitPauliOperator.from_OpenFermion`` and + ``QubitPauliOperator.to_OpenFermion`` methods are removed. +* The ``pytket.program`` module is removed. +* The ``pytket.telemetry`` module is removed. + +Major new features: + +* New methods for mapping logical to physical circuits for some ``Architecture``. + The new method will use a list of user-given methods, each of them suitable only + for a specific set of subcircuits. Users can add their own methods if they want to. + All compiler passes in pytket are updated to use the new methods. + The methods already given by pytket are ``LexiRouteRoutingMethod``, + ``LexiLabellingMethod``, ``MultiGateReorderRoutingMethod``, + ``AASRouteRoutingMethod``, ``BoxDecompositionRoutingMethod``, and ``AASLabellingMethod``. +Minor new features: + +* Add ``delay_measures`` option to ``DefaultMappingPass``. +* New ``pytket.passes.auto_rebase_pass`` and ``pytket.passes.auto_squash_pass`` + which attempt to construct rebase and squash passess given a target gate set from known + decompositions. +* Add ``get_c_register``, ``get_q_register``, ``c_registers`` and ``q_registers`` methods to ``Circuit``. +* New ``pytket.passes.NaivePlacementPass`` which completes a basic relabelling of all Circuit Qubit + not labelled as some Architecture Node to any available Architecture Node +* Add ``opgroups`` property to ``Circuit``. +* ``Architecture`` has new ``valid_operation`` method which returns true if passed UnitIDs that respect + architecture constraints. +* ``CircuitStatus`` has several new optional properties such as time-stamps associated with status changes, + queue position or detailed error information. + +Fixes: + +* ``ConnectivityPredicate.implies()`` checks for existence of isolated nodes as + well as edges in second architecture. + 0.19.2 (February 2022) ---------------------- @@ -9,7 +74,6 @@ Fixes: * Fix issue with jinja2 by updating dependency. - 0.19.1 (February 2022) ---------------------- diff --git a/pytket/docs/conf.py b/pytket/docs/conf.py index 44b70910b9..668ef973ba 100644 --- a/pytket/docs/conf.py +++ b/pytket/docs/conf.py @@ -38,9 +38,9 @@ author = "Cambridge Quantum Computing Ltd" # The short X.Y version -version = "0.19" +version = "1.0" # The full version, including alpha/beta/rc tags -release = "0.19.2" +release = "1.0.0" # -- General configuration --------------------------------------------------- @@ -215,7 +215,8 @@ "pytket.backends.backend.Backend": "pytket.backends.Backend", "tket::Predicate": "pytket._tket.predicates.Predicate", "tket::Qubit": "pytket._tket.circuit.Qubit", - "tket::Architecture": "pytket._tket.routing.Architecture", + "tket::Architecture": "pytket._tket.architecture.Architecture", + "tket::RoutingMethod": "pytket._tket.mapping.RoutingMethod", "tket::CircBox": "pytket._tket.circuit.CircBox", "tket::ExpBox": "pytket._tket.circuit.ExpBox", "tket::QControlBox": "pytket._tket.circuit.QControlBox", diff --git a/pytket/docs/index.rst b/pytket/docs/index.rst index f520675492..0a47674a00 100644 --- a/pytket/docs/index.rst +++ b/pytket/docs/index.rst @@ -125,15 +125,9 @@ LICENCE Licensed under the `Apache 2 License `_. -Telemetry Data Policy -~~~~~~~~~~~~~~~~~~~~~ - -Our telemetry data policy can be viewed in the `Telemetry Data Policy`_ page. - .. _Getting Started: getting_started.html .. _examples: https://github.com/CQCL/pytket/tree/main/examples .. _CQC: https://cambridgequantum.com -.. _Telemetry Data Policy: telemetry_data_policy.html .. toctree:: :caption: Introduction: @@ -142,7 +136,6 @@ Our telemetry data policy can be viewed in the `Telemetry Data Policy`_ page. getting_started.rst changelog.rst install.rst - Telemetry Data Policy opensource.rst .. toctree:: @@ -161,11 +154,12 @@ Our telemetry data policy can be viewed in the `Telemetry Data Policy`_ page. pauli.rst passes.rst predicates.rst - program.rst partition.rst qasm.rst quipper.rst - routing.rst + architecture.rst + placement.rst + mapping.rst tableau.rst transform.rst tailoring.rst diff --git a/pytket/docs/routing.rst b/pytket/docs/mapping.rst similarity index 60% rename from pytket/docs/routing.rst rename to pytket/docs/mapping.rst index ef9cfa9905..22b4ed6dfb 100644 --- a/pytket/docs/routing.rst +++ b/pytket/docs/mapping.rst @@ -1,5 +1,5 @@ -pytket.routing +pytket.mapping ================================== -.. automodule:: pytket._tket.routing +.. automodule:: pytket._tket.mapping :members: :special-members: __init__ diff --git a/pytket/docs/program.rst b/pytket/docs/placement.rst similarity index 58% rename from pytket/docs/program.rst rename to pytket/docs/placement.rst index 39b795a106..7fd347894d 100644 --- a/pytket/docs/program.rst +++ b/pytket/docs/placement.rst @@ -1,5 +1,5 @@ -pytket.program +pytket.placement ================================== -.. automodule:: pytket._tket.program +.. automodule:: pytket._tket.placement :members: :special-members: __init__ diff --git a/pytket/docs/telemetry_data_policy.rst b/pytket/docs/telemetry_data_policy.rst deleted file mode 100644 index 0ab8606ba7..0000000000 --- a/pytket/docs/telemetry_data_policy.rst +++ /dev/null @@ -1,165 +0,0 @@ -PYTKET TELEMETRY DATA POLICY -============================ - -1. Introduction ---------------- - -Cambridge Quantum Computing ("CQC", "we", "us" or "our") is a world -leading independent quantum computing software company. CQC respects -your privacy and is committed to safeguarding the privacy of information -provided to us. - -Software sometimes includes functionality to collect data about how the -software is used or performing ("Telemetry Data"). Telemetry Data is -often collected through a "phone home" mechanism built into the software -itself. Each end user deploying the software is typically presented with -the choice to opt-in to share statistical data with the developers of -the software. - -This Telemetry Data Policy ("Telemetry Data Policy") explains how we may -collect, store and use Telemetry Data that is obtained from the -telemetry feature for our proprietary "t\|ket>" software, also referred -to as 'pytket' in this Telemetry Data Policy. - -This telemetry feature calls back to CQC servers with information which -helps us understand how our users use our software libraries. It may -also collect diagnostic telemetry data which is integral to the -improvement of our software. If you have any questions about this, -please contact privacycqc@cambridgequantum.com. - -This Telemetry Data Policy provides you with certain information that -must be provided under the General Data Protection Regulation (*(EU) -2016/679*) (GDPR) and the local data protection law. - -Data collected -~~~~~~~~~~~~~~ - -The telemetry feature will collect the following Telemetry Data: - -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| Telemetry Data - Description | Value Example | -+=====================================================================================================================================================================================+============================+ -| Telemetry ID – A random ID | 130567 | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| Pytket related software versions – version of pytket installed and pytket plugins | pytket 0.6.1 | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| Machine | i386 | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| Processor – Processor Make | Intel, AMD | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| Python implementation | CPython, Pypy … | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| Python version | 3.6, 3.7 … | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| System | Linux, Windows, Java … | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| System version | Windows 10, Windows XP … | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| System release | NT6.1 | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| C API version | | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ -| Crash reports – technical information, such as error and usage statistics, which help our developers understand what is happening inside the product code if the software crashes | | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+ - -Users will be provided with a randomly generated telemetry ID which will -be used to identify the system that is then stored in the user's -configuration file. The telemetry feature will not collect any -Personally Identifiable Information (PII). For the avoidance of doubt, -CQC maintains that it will still treat all the Telemetry Data collected -(listed above) as though it is personal data. - -What the Data will be Used for -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In accordance with GDPR, the Telemetry Data will be used for: - -- determining how many people are using and downloading pytket - -- determining what kind of systems pytket is typically run on so we - know where to target support and features - -- support for reporting faults and errors automatically in a way that - can be collected and aggregated - i.e., after a release we can see if - a lot of errors start to occur. - -3. How we store the Data ------------------------- - -The Telemetry Data is stored in a managed PostgreSQL server hosted in -Azure Web Services, where it is stored with a timestamp as an event. - -4. Your Rights --------------- - -In accordance with the provisions of GDPR, we inform you that you can -exercise your rights of access, rectification, erasure, right to be -forgotten, opposition, data portability and limitation of processing -directly with CQC. - -To facilitate the exercise of these rights, you may send a message with -your request to this effect, indicating name, surname and e-mail address -to the following e-mail address: privacycqc@cambridgequantum.com - -5. Security ------------ - -We take reasonable steps to hold information securely in electronic or -physical form and to prevent unauthorised access, use, modification or -disclosure. We require our third-party data storage providers to comply -with appropriate information security industry standards. All partners -and staff and third-party providers with access to confidential -information are subject to confidentiality obligations.  - -The transmission of information via the internet is not completely -secure. We cannot guarantee the security of your data transmitted to our -online services; hence, any transmission is at your own risk. - -6. Acceptance of this Telemetry Data Policy -------------------------------------------- - -Any user who installs and uses CQC's proprietary 't\|ket>' software (the -"End User") ACKNOWLEDGES AND AGREES TO HAVE READ AND UNDERSTOOD this -Telemetry Data Policy, the content of which constitutes the entire -agreement between the End User and CQC regarding the use and processing -of the End User's Telemetry Data. The End User expressly agrees to be -bound by the terms of the present Telemetry Data Policy, in its entirety -and scope, without exception to any of its provisions. - -CQC reserves the right to update and make changes to the Telemetry Data -Policy even without prior notice to the End User. It is the End User's -responsibility to review the amended Telemetry Data Policy. - -7. Contact and Further Information ----------------------------------- - -Should you have any questions about this Telemetry Data Policy, or want -to submit a written complaint to us about how we handle your Telemetry -Data, please contact us at: - -  - -Attention: Data Protection Officer - -Cambridge Quantum Computing Limited - -32 St. James's Street - -London - -SW1A 1HD - -The United Kingdom - -Telephone: +44 (0) 203 301 9331 - -Email: \ privacycqc@cambridgequantum.com - -  - -If you make a privacy complaint, we will respond to let you know how -your complaint will be handled. We may ask you for further details, -consult with other parties and keep records regarding your complaint. - -**Last updated:** The Pytket Telemetry Data Policy was last updated on -30 November 2020. diff --git a/pytket/pyproject.toml b/pytket/pyproject.toml index dd0574d68c..ccc3aa56df 100644 --- a/pytket/pyproject.toml +++ b/pytket/pyproject.toml @@ -1,8 +1,3 @@ [build-system] -requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.2", "conan"] +requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.4", "conan"] build-backend = "setuptools.build_meta" - -[tool.setuptools_scm] -root = ".." -write_to = "pytket/pytket/_version.py" -write_to_template = '__version__ = "{version}"' diff --git a/pytket/pytket/__init__.py b/pytket/pytket/__init__.py index 7134d3a1dc..b7fccc6a0d 100755 --- a/pytket/pytket/__init__.py +++ b/pytket/pytket/__init__.py @@ -20,10 +20,17 @@ Qubit, Bit, ) -import pytket.routing +import pytket.mapping +import pytket.architecture +import pytket.placement import pytket.transform -import pytket.telemetry - +from pytket.config import PytketConfig, get_config_file_path from pytket._version import __version__ +# Create pytket config file if it does not exist: +pytket_config_file = get_config_file_path() +if not pytket_config_file.exists(): + config = PytketConfig.default() + config.write_file(pytket_config_file) + __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/pytket/pytket/program/__init__.py b/pytket/pytket/architecture/__init__.py similarity index 77% rename from pytket/pytket/program/__init__.py rename to pytket/pytket/architecture/__init__.py index 204b68c929..bf4309e634 100644 --- a/pytket/pytket/program/__init__.py +++ b/pytket/pytket/architecture/__init__.py @@ -12,9 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -This module provides an API to interact with the intermediate representation for tket's -:py:class:`Program` objects. -""" +"""The `architecture` module provides an API to interact with the + ::py:class:`Architecture` class.""" -from pytket._tket.program import * # type: ignore +from pytket._tket.architecture import * # type: ignore diff --git a/pytket/pytket/backends/backend.py b/pytket/pytket/backends/backend.py index 0194fa69e7..cdff8095a3 100644 --- a/pytket/pytket/backends/backend.py +++ b/pytket/pytket/backends/backend.py @@ -157,40 +157,6 @@ def default_compilation_pass(self, optimisation_level: int = 1) -> BasePass: """ ... - def compile_circuit(self, circuit: Circuit, optimisation_level: int = 1) -> None: - """Apply the default_compilation_pass to a circuit in place. - - As well as applying a degree of optimisation (controlled by the - `optimisation_level` parameter), this method tries to ensure that the circuit - can be run on the backend (i.e. successfully passed to - :py:meth:`process_circuits`), for example by rebasing to the supported gate set, - or routing to match the connectivity of the device. However, this is not always - possible, for example if the circuit contains classical operations that are not - supported by the backend. You may use :py:meth:`valid_circuit` to check whether - the circuit meets the backend's requirements after compilation. This validity - check is included in :py:meth:`process_circuits` by default, before any circuits - are submitted to the backend. - - If the validity check fails, you can obtain more information about the failure - by iterating through the predicates in the `required_predicates` property of the - backend, and running the :py:meth:`verify` method on each in turn with your - circuit. - - :param circuit: The circuit to compile. - :type circuit: Circuit - :param optimisation_level: The level of optimisation to perform during - compilation. Level 0 just solves the device constraints without - optimising. Level 1 additionally performs some light optimisations. - Level 2 adds more intensive optimisations that can increase compilation - time for large circuits. Defaults to 1. - :type optimisation_level: int, optional - """ - warnings.warn( - "compile_circuit is deprecated and will be removed in a future pytket.", - DeprecationWarning, - ) - self.default_compilation_pass(optimisation_level).apply(circuit) - def get_compiled_circuit( self, circuit: Circuit, optimisation_level: int = 1 ) -> Circuit: @@ -440,19 +406,6 @@ def cancel(self, handle: ResultHandle) -> None: """ raise NotImplementedError("Backend does not support job cancellation.") - @property - def characterisation(self) -> Optional[dict]: - """Retrieve the characterisation targeted by the backend if it exists. - - :return: The characterisation that this backend targets if it exists. The - characterisation object contains device-specific information such as gate - error rates. - :rtype: Optional[dict] - """ - raise NotImplementedError( - "Backend does not support retrieving characterisation." - ) - @property def backend_info(self) -> Optional[BackendInfo]: """Retrieve all Backend properties in a BackendInfo object, including diff --git a/pytket/pytket/backends/backendinfo.py b/pytket/pytket/backends/backendinfo.py index a3469f405f..e3b3d8ce5b 100644 --- a/pytket/pytket/backends/backendinfo.py +++ b/pytket/pytket/backends/backendinfo.py @@ -13,12 +13,126 @@ # limitations under the License. """ BackendInfo class: additional information on Backends """ + from dataclasses import dataclass, field, asdict from typing import Any, Dict, List, Optional, Set, cast, Tuple, Union -from pytket.routing import Architecture, FullyConnected # type: ignore +from pytket.architecture import Architecture, FullyConnected # type: ignore from pytket.circuit import Node, OpType # type: ignore +_OpTypeErrs = Dict[OpType, float] +_Edge = Tuple[Node, Node] + + +def _serialize_all_node_gate_errors( + d: Optional[Dict[Node, _OpTypeErrs]] +) -> Optional[List[List]]: + if d is None: + return None + return [ + [n.to_list(), {ot.name: err for ot, err in errs.items()}] + for n, errs in d.items() + ] + + +def _deserialize_all_node_gate_errors( + l: Optional[List[List]], +) -> Optional[Dict[Node, _OpTypeErrs]]: + if l is None: + return None + return { + Node.from_list(n): {OpType.from_name(ot): err for ot, err in errs.items()} + for n, errs in l + } + + +def _serialize_all_edge_gate_errors( + d: Optional[Dict[_Edge, _OpTypeErrs]] +) -> Optional[List]: + if d is None: + return None + return [ + [[n0.to_list(), n1.to_list()], {ot.name: err for ot, err in errs.items()}] + for (n0, n1), errs in d.items() + ] + + +def _deserialize_all_edge_gate_errors( + l: Optional[List], +) -> Optional[Dict[_Edge, _OpTypeErrs]]: + if l is None: + return None + return { + (Node.from_list(n0), Node.from_list(n1)): { + OpType.from_name(ot): err for ot, err in errs.items() + } + for (n0, n1), errs in l + } + + +def _serialize_all_readout_errors( + d: Optional[Dict[Node, List[List[float]]]] +) -> Optional[List[List]]: + if d is None: + return None + return [[n.to_list(), errs] for n, errs in d.items()] + + +def _deserialize_all_readout_errors( + l: Optional[List[List]], +) -> Optional[Dict[Node, List[List[float]]]]: + if l is None: + return None + return {Node.from_list(n): errs for n, errs in l} + + +def _serialize_averaged_node_gate_errors( + d: Optional[Dict[Node, float]] +) -> Optional[List[List]]: + if d is None: + return None + return [[n.to_list(), err] for n, err in d.items()] + + +def _deserialize_averaged_node_gate_errors( + l: Optional[List[List]], +) -> Optional[Dict[Node, float]]: + if l is None: + return None + return {Node.from_list(n): err for n, err in l} + + +def _serialize_averaged_edge_gate_errors( + d: Optional[Dict[_Edge, float]] +) -> Optional[List[List]]: + if d is None: + return None + return [[[n0.to_list(), n1.to_list()], err] for (n0, n1), err in d.items()] + + +def _deserialize_averaged_edge_gate_errors( + l: Optional[List[List]], +) -> Optional[Dict[Tuple, float]]: + if l is None: + return None + return {(Node.from_list(n0), Node.from_list(n1)): err for (n0, n1), err in l} + + +def _serialize_averaged_readout_errors( + d: Optional[Dict[Node, float]] +) -> Optional[List[List]]: + if d is None: + return None + return [[n.to_list(), err] for n, err in d.items()] + + +def _deserialize_averaged_readout_errors( + l: Optional[List[List]], +) -> Optional[Dict[Node, float]]: + if l is None: + return None + return {Node.from_list(n): err for n, err in l} + @dataclass class BackendInfo: @@ -41,14 +155,15 @@ class BackendInfo: :param all_edge_gate_errors: Dictionary between architecture couplings and error rate for different two-qubit operations. :param all_readout_errors: Dictionary between architecture Node and uncorrelated - single qubit readout errors. + single qubit readout errors (2x2 readout probability matrix). :param averaged_node_gate_errors: Dictionary between architecture Node and averaged error rate for all single qubit operations. :param averaged_edge_gate_errors: Dictionary between architecture couplings and averaged error rate for all two-qubit operations. :param averaged_readout_errors: Dictionary between architecture Node and averaged readout errors. - :param misc: key-value map with further provider-specific information + :param misc: key-value map with further provider-specific information (must be + JSON-serializable) """ # identifying information @@ -131,6 +246,24 @@ def to_dict(self) -> Dict[str, Any]: self_dict = asdict(self) self_dict["architecture"] = self_dict["architecture"].to_dict() self_dict["gate_set"] = [op.value for op in self_dict["gate_set"]] + self_dict["all_node_gate_errors"] = _serialize_all_node_gate_errors( + self_dict["all_node_gate_errors"] + ) + self_dict["all_edge_gate_errors"] = _serialize_all_edge_gate_errors( + self_dict["all_edge_gate_errors"] + ) + self_dict["all_readout_errors"] = _serialize_all_readout_errors( + self_dict["all_readout_errors"] + ) + self_dict["averaged_node_gate_errors"] = _serialize_averaged_node_gate_errors( + self_dict["averaged_node_gate_errors"] + ) + self_dict["averaged_edge_gate_errors"] = _serialize_averaged_edge_gate_errors( + self_dict["averaged_edge_gate_errors"] + ) + self_dict["averaged_readout_errors"] = _serialize_averaged_readout_errors( + self_dict["averaged_readout_errors"] + ) return self_dict @classmethod @@ -149,6 +282,24 @@ def from_dict(cls, d: Dict[str, Any]) -> "BackendInfo": else: args["architecture"] = FullyConnected.from_dict(args["architecture"]) args["gate_set"] = {OpType(op) for op in args["gate_set"]} + args["all_node_gate_errors"] = _deserialize_all_node_gate_errors( + args["all_node_gate_errors"] + ) + args["all_edge_gate_errors"] = _deserialize_all_edge_gate_errors( + args["all_edge_gate_errors"] + ) + args["all_readout_errors"] = _deserialize_all_readout_errors( + args["all_readout_errors"] + ) + args["averaged_node_gate_errors"] = _deserialize_averaged_node_gate_errors( + args["averaged_node_gate_errors"] + ) + args["averaged_edge_gate_errors"] = _deserialize_averaged_edge_gate_errors( + args["averaged_edge_gate_errors"] + ) + args["averaged_readout_errors"] = _deserialize_averaged_readout_errors( + args["averaged_readout_errors"] + ) return cls(**args) diff --git a/pytket/pytket/backends/status.py b/pytket/pytket/backends/status.py index 3b28e10353..49d70badd7 100644 --- a/pytket/pytket/backends/status.py +++ b/pytket/pytket/backends/status.py @@ -14,7 +14,8 @@ """Status classes for circuits submitted to backends. """ -from typing import Any, Dict, NamedTuple +from datetime import datetime +from typing import Any, Callable, Dict, NamedTuple, Optional from enum import Enum @@ -30,15 +31,54 @@ class StatusEnum(Enum): class CircuitStatus(NamedTuple): - """The status of a circuit along with optional long description, \ -for example an error message.""" + """The status of a circuit along with an optional description. + + Optionally can also include extra fields such as: + * Detailed error information. + * Timestamps for changes in status. + * Queue position. + """ status: StatusEnum message: str = "" + error_detail: Optional[str] = None + + # Timestamp for when a status was last entered. + completed_time: Optional[datetime] = None + queued_time: Optional[datetime] = None + submitted_time: Optional[datetime] = None + running_time: Optional[datetime] = None + cancelled_time: Optional[datetime] = None + error_time: Optional[datetime] = None + + queue_position: Optional[int] = None def to_dict(self) -> Dict[str, Any]: """Return JSON serializable dictionary representation.""" - return {"status": self.status.name, "message": self.message} + circuit_status_dict: Dict[str, Any] = { + "status": self.status.name, + "message": self.message, + } + if self.error_detail is not None: + circuit_status_dict["error_detail"] = self.error_detail + + if self.completed_time is not None: + circuit_status_dict["completed_time"] = self.completed_time.isoformat() + if self.queued_time is not None: + circuit_status_dict["queued_time"] = self.queued_time.isoformat() + if self.submitted_time is not None: + circuit_status_dict["submitted_time"] = self.submitted_time.isoformat() + if self.running_time is not None: + circuit_status_dict["running_time"] = self.running_time.isoformat() + if self.cancelled_time is not None: + circuit_status_dict["cancelled_time"] = self.cancelled_time.isoformat() + if self.error_time is not None: + circuit_status_dict["error_time"] = self.error_time.isoformat() + + if self.queue_position is not None: + circuit_status_dict["queue_position"] = self.queue_position + + return circuit_status_dict @classmethod def from_dict(cls, dic: Dict[str, Any]) -> "CircuitStatus": @@ -46,11 +86,38 @@ def from_dict(cls, dic: Dict[str, Any]) -> "CircuitStatus": invalid = ValueError(f"Dictionary invalid format for CircuitStatus: {dic}") if "message" not in dic or "status" not in dic: raise invalid + try: status = next(s for s in StatusEnum if dic["status"] == s.name) except StopIteration as e: raise invalid from e - return cls(status, dic["message"]) + + error_detail = dic.get("error_detail", None) + + read_optional_datetime: Callable[[str], Optional[datetime]] = lambda key: ( + datetime.fromisoformat(x) if (x := dic.get(key)) is not None else None + ) + completed_time = read_optional_datetime("completed_time") + queued_time = read_optional_datetime("queued_time") + submitted_time = read_optional_datetime("submitted_time") + running_time = read_optional_datetime("running_time") + cancelled_time = read_optional_datetime("cancelled_time") + error_time = read_optional_datetime("error_time") + + queue_position = dic.get("queue_position", None) + + return cls( + status, + dic["message"], + error_detail, + completed_time, + queued_time, + submitted_time, + running_time, + cancelled_time, + error_time, + queue_position, + ) WAITING_STATUS = {StatusEnum.QUEUED, StatusEnum.SUBMITTED, StatusEnum.RUNNING} diff --git a/pytket/pytket/config/pytket_config.py b/pytket/pytket/config/pytket_config.py index 64b3584ecd..15ef23cf2d 100644 --- a/pytket/pytket/config/pytket_config.py +++ b/pytket/pytket/config/pytket_config.py @@ -15,7 +15,6 @@ from abc import ABC, abstractmethod from pathlib import Path from typing import Any, ClassVar, Dict, Optional, Type, TypeVar -from uuid import UUID from dataclasses import asdict import json import os @@ -39,35 +38,25 @@ class PytketConfig: """PytketConfig represents a loaded config file for pytket and extension packages.""" - enable_telemetry: bool - telemetry_id: Optional[UUID] extensions: Dict[str, Any] def __init__( self, - enable_telemetry: bool, - telemetry_id: Optional[UUID], extensions: Optional[Dict[str, Any]] = None, ) -> None: """Construct a PytketConfig object with inital config parameter values. - :param enable_telemetry: Set pytket telemetery on. - :type enable_telemetry: bool - :param telemetry_id: UUID identifying this system for telemetery - :type telemetry_id: Optional[UUID] :param extensions: Dictionary holding parameter values for extension packages, defaults to None :type extensions: Optional[Dict[str, Any]], optional """ - self.enable_telemetry = enable_telemetry - self.telemetry_id = telemetry_id self.extensions = {} if extensions is None else extensions @classmethod def default(cls) -> "PytketConfig": """Construct a default PytketConfig""" - return PytketConfig(enable_telemetry=False, telemetry_id=None) + return PytketConfig() @classmethod def read_file(cls, config_file_path: Path) -> "PytketConfig": @@ -75,8 +64,6 @@ def read_file(cls, config_file_path: Path) -> "PytketConfig": with config_file_path.open("r", encoding="utf-8") as config_file: config = json.load(config_file) return PytketConfig( - config.get("enable_telemetry", False), - config.get("telemetry_id", None), config.get("extensions", dict()), ) @@ -85,8 +72,6 @@ def write_file(self, config_file_path: Path) -> None: config_file_path.parent.mkdir(parents=True, exist_ok=True) with config_file_path.open("w", encoding="utf-8") as config_file: config = { - "enable_telemetry": self.enable_telemetry, - "telemetry_id": self.telemetry_id, "extensions": self.extensions, } json.dump(config, config_file, indent=2) diff --git a/pytket/pytket/routing/__init__.py b/pytket/pytket/mapping/__init__.py similarity index 58% rename from pytket/pytket/routing/__init__.py rename to pytket/pytket/mapping/__init__.py index e1fc81a725..a413b474b3 100644 --- a/pytket/pytket/routing/__init__.py +++ b/pytket/pytket/mapping/__init__.py @@ -12,12 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -The routing module provides access to the tket :py:class:`Architecture` structure and -methods for modifying circuits to satisfy the architectural constraints. It also -provides acess to the :py:class:`Placement` constructors for relabelling Circuit qubits -and has some methods for routing circuits. This module is provided in binary form during -the PyPI installation. -""" +"""The `mapping` module provides an API to interact with the + :py:class:`MappingManager` class, with methods for + mapping logical circuits to physical circuits and for + defining custom routing solutions.""" -from pytket._tket.routing import * # type: ignore +from pytket._tket.mapping import * # type: ignore diff --git a/pytket/pytket/passes/__init__.py b/pytket/pytket/passes/__init__.py index 07004a66a4..b617a99486 100644 --- a/pytket/pytket/passes/__init__.py +++ b/pytket/pytket/passes/__init__.py @@ -17,3 +17,4 @@ from pytket._tket.passes import * # type: ignore from .script import compilation_pass_from_script, compilation_pass_grammar +from .auto_rebase import auto_rebase_pass, auto_squash_pass diff --git a/pytket/pytket/passes/_decompositions.py b/pytket/pytket/passes/_decompositions.py new file mode 100644 index 0000000000..91c0648f5f --- /dev/null +++ b/pytket/pytket/passes/_decompositions.py @@ -0,0 +1,114 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union +from sympy import Expr # type: ignore +from pytket.circuit import Circuit, OpType # type: ignore + +Param = Union[float, "Expr"] + + +def approx_0_mod_2(x: Param, eps: float = 1e-10) -> bool: + """Check if parameter is approximately 0 mod 2 up to eps precision. + + :param param: Parameter, float or sympy expression. + :type param: Param + :param eps: Tolerance, defaults to 1e-10 + :type eps: float, optional + :return: Approximately 0 boolean. + :rtype: bool + """ + if isinstance(x, Expr) and not x.is_constant(): # type: ignore + return False + x = float(x) + x %= 2 + return min(x, 2 - x) < eps + + +def int_half(angle: float) -> int: + """Assume angle is approximately an even integer, and return the half + + :param angle: Float angle + :type angle: float + :return: Integer half of angle + :rtype: int + """ + # + two_x = round(angle) + assert not two_x % 2 + return two_x // 2 + + +def _TK1_to_RxRy(a: Param, b: Param, c: Param) -> Circuit: + return Circuit(1).Rx(-0.5, 0).Ry(c, 0).Rx(b, 0).Ry(a, 0).Rx(0.5, 0) + + +def _TK1_to_X_SX_Rz(a: Param, b: Param, c: Param) -> Circuit: + circ = Circuit(1) + correction_phase = 0.0 + + # all phase identities use, for integer k, + # Rx(2k) = Rz(2k) = (-1)^{k}I + + # _approx_0_mod_2 checks if parameters are constant + # so they can be assumed to be constant + if approx_0_mod_2(b): + circ.Rz(a + c, 0) + # b = 2k, if k is odd, then Rx(b) = -I + correction_phase += int_half(float(b)) + + elif approx_0_mod_2(b + 1): + # Use Rx(2k-1) = i(-1)^{k}X + correction_phase += -0.5 + int_half(float(b) - 1) + if approx_0_mod_2(a - c): + circ.X(0) + # a - c = 2m + # overall operation is (-1)^{m}Rx(2k -1) + correction_phase += int_half(float(a - c)) + + else: + circ.Rz(c, 0).X(0).Rz(a, 0) + + elif approx_0_mod_2(b - 0.5) and approx_0_mod_2(a) and approx_0_mod_2(c): + # a = 2k, b = 2m+0.5, c = 2n + # Rz(2k)Rx(2m + 0.5)Rz(2n) = (-1)^{k+m+n}e^{-i \pi /4} SX + circ.SX(0) + correction_phase += ( + int_half(float(b) - 0.5) + int_half(float(a)) + int_half(float(c)) - 0.25 + ) + + elif approx_0_mod_2(b + 0.5) and approx_0_mod_2(a) and approx_0_mod_2(c): + # a = 2k, b = 2m-0.5, c = 2n + # Rz(2k)Rx(2m - 0.5)Rz(2n) = (-1)^{k+m+n}e^{i \pi /4} X.SX + circ.X(0).SX(0) + correction_phase += ( + int_half(float(b) + 0.5) + int_half(float(a)) + int_half(float(c)) + 0.25 + ) + elif approx_0_mod_2(a - 0.5) and approx_0_mod_2(c - 0.5): + # Rz(2k + 0.5)Rx(b)Rz(2m + 0.5) = -i(-1)^{k+m}SX.Rz(1-b).SX + circ.SX(0).Rz(1 - b, 0).SX(0) + correction_phase += int_half(float(a) - 0.5) + int_half(float(c) - 0.5) - 0.5 + else: + circ.Rz(c + 0.5, 0).SX(0).Rz(b - 1, 0).SX(0).Rz(a + 0.5, 0) + correction_phase += -0.5 + + circ.add_phase(correction_phase) + return circ + + +def _TK1_to_U(a: Param, b: Param, c: Param) -> Circuit: + circ = Circuit(1) + circ.add_gate(OpType.U3, [b, a - 0.5, c + 0.5], [0]) + circ.add_phase(-0.5 * (a + c)) + return circ diff --git a/pytket/pytket/passes/auto_rebase.py b/pytket/pytket/passes/auto_rebase.py new file mode 100644 index 0000000000..0dca9c5734 --- /dev/null +++ b/pytket/pytket/passes/auto_rebase.py @@ -0,0 +1,114 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Set, Callable, Dict, FrozenSet +from pytket.circuit import Circuit, OpType # type: ignore +from pytket._tket.circuit import _library # type: ignore +from pytket.passes import RebaseCustom, SquashCustom # type: ignore + +from ._decompositions import Param, _TK1_to_X_SX_Rz, _TK1_to_RxRy, _TK1_to_U + + +class NoAutoRebase(Exception): + """Automatic rebase could not be found.""" + + +_CX_CIRCS: Dict[OpType, Callable[[], "Circuit"]] = { + OpType.CX: _library._CX, + OpType.ZZMax: _library._CX_using_ZZMax, + OpType.XXPhase: _library._CX_using_XXPhase_0, + OpType.ECR: _library._CX_using_ECR, + OpType.CZ: _library._H_CZ_H, +} + + +def get_cx_decomposition(gateset: Set[OpType]) -> Circuit: + """Return a Circuit expressing a CX in terms of a two qubit gate in the + gateset if one is available, raise an error otherwise. + + :param gateset: Target gate set. + :type gateset: Set[OpType] + :raises NoAutoRebase: No suitable CX decomposition found. + :return: Decomposuition circuit. + :rtype: Circuit + """ + if any((matching := k) in gateset for k in _CX_CIRCS): + return _CX_CIRCS[matching]() + raise NoAutoRebase("No known decomposition from CX to available gateset.") + + +_TK1_circs: Dict[FrozenSet[OpType], Callable[[Param, Param, Param], "Circuit"]] = { + frozenset({OpType.TK1}): _library._TK1_to_TK1, + frozenset({OpType.PhasedX, OpType.Rz}): _library._TK1_to_PhasedXRz, + frozenset({OpType.Rx, OpType.Rz}): _library._TK1_to_RzRx, + frozenset({OpType.Ry, OpType.Rx}): _TK1_to_RxRy, + frozenset({OpType.Rz, OpType.H}): _library._TK1_to_RzH, + frozenset({OpType.Rz, OpType.SX, OpType.X}): _TK1_to_X_SX_Rz, + frozenset({OpType.Rz, OpType.SX}): _TK1_to_X_SX_Rz, + frozenset({OpType.Rz, OpType.SX}): _library._TK1_to_RzSX, + frozenset({OpType.U3}): _TK1_to_U, +} + + +def get_TK1_decomposition_function( + gateset: Set[OpType], +) -> Callable[[Param, Param, Param], "Circuit"]: + """Return a function for generating TK1 equivalent circuits, which take the + three TK1 parameters as arguments and return a TK1 equivalent single qubit + circuit. If no such function is available, raise an error. + + :raises NoAutoRebase: No suitable TK1 decomposition found. + :return: TK1 decomposition function. + :rtype: Callable[[Param, Param, Param], "Circuit"] + """ + subsets = [k for k in _TK1_circs if k.issubset(gateset)] + if subsets: + # find the largest available subset + # as in general more available gates leads to smaller circuits + matching = max(subsets, key=len) + return _TK1_circs[matching] + raise NoAutoRebase("No known decomposition from TK1 to available gateset.") + + +def auto_rebase_pass(gateset: Set[OpType]) -> RebaseCustom: + """Attempt to generate a rebase pass automatically for the given target + gateset. + + Checks if there are known existing decompositions from CX + to target gateset and TK1 to target gateset and uses those to construct a + custom rebase. + Raises an error if no known decompositions can be found, in which case try + using RebaseCustom with your own decompositions. + + :param gateset: Set of supported OpTypes, target gate set. + :type gateset: FrozenSet[OpType] + :raises NoAutoRebase: No suitable CX or TK1 decomposition found. + :return: Rebase pass. + :rtype: RebaseCustom + """ + return RebaseCustom( + gateset, get_cx_decomposition(gateset), get_TK1_decomposition_function(gateset) + ) + + +def auto_squash_pass(gateset: Set[OpType]) -> SquashCustom: + """Attempt to generate a squash pass automatically for the given target + single qubit gateset. + + :param gateset: Available single qubit gateset + :type gateset: Set[OpType] + :return: Squash to target gateset + :rtype: SquashCustom + """ + return SquashCustom(gateset, get_TK1_decomposition_function(gateset)) diff --git a/pytket/pytket/passes/script.py b/pytket/pytket/passes/script.py index 34d1d4fefd..ca459aaffa 100644 --- a/pytket/pytket/passes/script.py +++ b/pytket/pytket/passes/script.py @@ -35,15 +35,7 @@ PauliSimp, PauliSquash, PeepholeOptimise2Q, - RebaseCirq, - RebaseHQS, - RebaseProjectQ, - RebasePyZX, - RebaseQuil, RebaseTket, - RebaseUMD, - RebaseUFR, - RebaseOQC, RemoveBarriers, RemoveDiscarded, RemoveRedundancies, @@ -53,7 +45,6 @@ SynthesiseTket, SynthesiseOQC, SynthesiseUMD, - SquashHQS, ThreeQubitSquash, ) from pytket.transform import CXConfigType, PauliSynthStrat # type: ignore @@ -87,22 +78,13 @@ | pauli_squash | pauli_squash_default | peephole_optimise_2q - | rebase_cirq - | rebase_hqs - | rebase_oqc - | rebase_projectq - | rebase_pyzx - | rebase_quil | rebase_tket - | rebase_ufr - | rebase_umd | remove_barriers | remove_discarded | remove_redundancies | simplify_initial | simplify_initial_no_classical | simplify_measured - | squash_hqs | synthesise_hqs | synthesise_tket | synthesise_oqc @@ -137,22 +119,13 @@ pauli_squash: "PauliSquash" "(" pauli_synth_strat "," cx_config_type ")" pauli_squash_default: "PauliSquash" peephole_optimise_2q: "PeepholeOptimise2Q" -rebase_cirq: "RebaseCirq" -rebase_hqs: "RebaseHQS" -rebase_oqc: "RebaseOQC" -rebase_projectq: "RebaseProjectQ" -rebase_pyzx: "RebasePyZX" -rebase_quil: "RebaseQuil" rebase_tket: "RebaseTket" -rebase_ufr: "RebaseUFR" -rebase_umd: "RebaseUMD" remove_barriers: "RemoveBarriers" remove_discarded: "RemoveDiscarded" remove_redundancies: "RemoveRedundancies" simplify_initial: "SimplifyInitial" simplify_initial_no_classical: "SimplifyInitialNoClassical" simplify_measured: "SimplifyMeasured" -squash_hqs: "SquashHQS" synthesise_hqs: "SynthesiseHQS" synthesise_tket: "SynthesiseTket" synthesise_oqc: "SynthesiseOQC" @@ -283,33 +256,9 @@ def pauli_squash_default(self, t: List) -> BasePass: def peephole_optimise_2q(self, t: List) -> BasePass: return PeepholeOptimise2Q() - def rebase_cirq(self, t: List) -> BasePass: - return RebaseCirq() - - def rebase_hqs(self, t: List) -> BasePass: - return RebaseHQS() - - def rebase_oqc(self, t: List) -> BasePass: - return RebaseOQC() - - def rebase_projectq(self, t: List) -> BasePass: - return RebaseProjectQ() - - def rebase_pyzx(self, t: List) -> BasePass: - return RebasePyZX() - - def rebase_quil(self, t: List) -> BasePass: - return RebaseQuil() - def rebase_tket(self, t: List) -> BasePass: return RebaseTket() - def rebase_ufr(self, t: List) -> BasePass: - return RebaseUFR() - - def rebase_umd(self, t: List) -> BasePass: - return RebaseUMD() - def remove_barriers(self, t: List) -> BasePass: return RemoveBarriers() @@ -328,9 +277,6 @@ def simplify_initial_no_classical(self, t: List) -> BasePass: def simplify_measured(self, t: List) -> BasePass: return SimplifyMeasured() - def squash_hqs(self, t: List) -> BasePass: - return SquashHQS() - def synthesise_hqs(self, t: List) -> BasePass: return SynthesiseHQS() diff --git a/pytket/pytket/placement/__init__.py b/pytket/pytket/placement/__init__.py new file mode 100644 index 0000000000..4b7ae79613 --- /dev/null +++ b/pytket/pytket/placement/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The `placement` module provides an API to interact with the many + :py:class:`Placement` options, providing methods for relabelling + logical circuit qubit identifiers to physical architecture node identifiers, + for the purpose of compilation.""" + +from pytket._tket.placement import * # type: ignore diff --git a/pytket/pytket/qasm/qasm.py b/pytket/pytket/qasm/qasm.py index 1adc5856c3..8263d3eeac 100644 --- a/pytket/pytket/qasm/qasm.py +++ b/pytket/pytket/qasm/qasm.py @@ -75,8 +75,10 @@ "u2": OpType.U2, "u1": OpType.U1, "rx": OpType.Rx, + "rxx": OpType.XXPhase, "ry": OpType.Ry, "rz": OpType.Rz, + "rzz": OpType.ZZPhase, "Rz": OpType.Rz, "U1q": OpType.PhasedX, "crz": OpType.CRz, @@ -117,8 +119,10 @@ "u2", "u1", "rx", + "rxx", "ry", "rz", + "rzz", "crz", "crx", "cry", @@ -140,9 +144,9 @@ ), } included_gates["hqslib1"] = included_gates["qelib1"].copy() -included_gates["hqslib1"].update(("U1q", "rz", "ZZ")) +included_gates["hqslib1"].update(("U1q", "rz", "ZZ", "RZZ")) included_gates["hqslib1"].difference_update( - ("crx", "cry", "sx", "sxdg", "csx", "swap", "cswap") + ("crx", "cry", "sx", "sxdg", "csx", "swap", "cswap", "rzz") ) _tk_to_qasm_noparams = dict(((item[1], item[0]) for item in NOPARAM_COMMANDS.items())) _tk_to_qasm_noparams[OpType.CX] = "cx" # prefer "cx" to "CX" @@ -223,7 +227,7 @@ def parse_custom_gate(self, data: str) -> None: gatename, arg_list = signature.split(" ", 1) symbol_list = "" gatename = gatename.strip() - symbols = [sympify(s.strip()) for s in symbol_list.split(",")] + symbols = [sympify(s.strip()) for s in symbol_list.split(",")] # type: ignore args = [a.strip() for a in arg_list.split(",")] rename_map = {} qb_map = {} @@ -334,7 +338,7 @@ def parse_instruction( halfturn_angles = [] for ang in angles: try: - halfturns = sympify(ang) / pi + halfturns = sympify(ang) / pi # type: ignore halfturn_angles.append(halfturns) except: raise QASMParseError("Cannot parse angle: {}".format(ang)) @@ -576,7 +580,11 @@ def circuit_to_qasm_io( f"{args[-1]} = {args[0]} {_classical_gatestr_map[opstr]} {args[1]};\n" ) continue - if optype in _tk_to_qasm_noparams: + if header == "hqslib1" and optype == OpType.ZZPhase: + # special handling for zzphase + opstr = "RZZ" + params = op.params + elif optype in _tk_to_qasm_noparams: opstr = _tk_to_qasm_noparams[optype] elif optype in _tk_to_qasm_params: opstr = _tk_to_qasm_params[optype] diff --git a/pytket/pytket/telemetry/__init__.py b/pytket/pytket/telemetry/__init__.py deleted file mode 100644 index e1486e4848..0000000000 --- a/pytket/pytket/telemetry/__init__.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2019-2022 Cambridge Quantum Computing -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""The telemetry module defines how pytket installations can register themselves.""" -from pathlib import Path -from urllib import request -from urllib.error import HTTPError, URLError -from urllib.request import Request -import json -from logging import getLogger -import os -import platform -import sys - -from pytket.config import PytketConfig, get_config_file_path - -logger = getLogger(__name__) - - -def _register(pytket_config_file: Path) -> None: - pytket_version: str - try: - # For python 3.8 onwards - from importlib.metadata import version # type: ignore - - pytket_version = version("pytket") - except ImportError: - import pkg_resources - - pytket_version = pkg_resources.get_distribution("pytket").version - - data = { - "version": pytket_version, - "machine": platform.machine(), - "processor": platform.processor(), - "python_implementation": platform.python_implementation(), - "python_version": platform.python_version(), - "system": platform.system(), - "system_version": platform.version(), - "system_release": platform.release(), - "c_api_version": sys.api_version, - } - - headers = {"Content-Type": "application/json"} - json_data = json.dumps(data).encode("utf8") - - try: - resp = request.urlopen( - Request( - "https://telemetry.cambridgequantum.com/v3/register", - json_data, - headers, - ), - timeout=10, - ) - if resp.status != 200: - logger.error( - "failed to register pytket with http status code: %s", resp.status - ) - else: - resp_body = json.loads(resp.read(64).decode("utf-8")) - telemetry_id = resp_body["telemetry_id"] - - telemetry_config = PytketConfig.read_file(pytket_config_file) - telemetry_config.telemetry_id = telemetry_id - telemetry_config.write_file(pytket_config_file) - - except (URLError, HTTPError) as err: - logger.error("failed to register pytket with exception: %s", err) - - -def _set_telemetry_preference(enabled: bool) -> None: - pytket_config_file = get_config_file_path() - if not pytket_config_file.exists(): - default_config = PytketConfig.default() - default_config.write_file(pytket_config_file) - - telemetry_config = PytketConfig.read_file(pytket_config_file) - telemetry_config.enable_telemetry = enabled - telemetry_config.write_file(pytket_config_file) - - -def opt_in() -> None: - """Opt into pytket telemetry""" - _set_telemetry_preference(True) - print("Successfully opted into telemetry") - - -def opt_out() -> None: - """Opt out of pytket telemetry""" - _set_telemetry_preference(False) - print("Successfully opted out of telemetry") - - -def _on_module_load() -> None: - config: PytketConfig - pytket_config_file = get_config_file_path() - if pytket_config_file.exists(): - config = PytketConfig.read_file(pytket_config_file) - else: - config = PytketConfig.default() - config.write_file(pytket_config_file) - - if config.enable_telemetry and config.telemetry_id is None: - _register(pytket_config_file) - - -_on_module_load() diff --git a/pytket/pytket/utils/operators.py b/pytket/pytket/utils/operators.py index 31c5b37001..52a494e781 100644 --- a/pytket/pytket/utils/operators.py +++ b/pytket/pytket/utils/operators.py @@ -16,17 +16,10 @@ from typing import cast, Dict, TYPE_CHECKING, Union, List, Optional, Set, Any import numpy as np from sympy import Symbol, sympify, Expr, re, im # type: ignore -from pytket.pauli import Pauli, QubitPauliString, pauli_string_mult # type: ignore +from pytket.pauli import QubitPauliString, pauli_string_mult # type: ignore from pytket.circuit import Qubit # type: ignore from pytket.utils.serialization import complex_to_list, list_to_complex # type: ignore -_of_installed = True - -try: - from openfermion import QubitOperator # type: ignore -except ImportError: - _of_installed = False - CoeffType = Union[int, float, complex, Expr] @@ -65,7 +58,7 @@ def __init__( self._dict = dict() if dictionary: self._dict = dict( - (key, sympify(value)) for key, value in dictionary.items() + (key, sympify(value)) for key, value in dictionary.items() # type: ignore ) self._collect_qubits() @@ -87,7 +80,7 @@ def __setitem__(self, key: QubitPauliString, value: CoeffType) -> None: :param value: Associated coefficient :type value: Union[int, float, complex, Expr] """ - self._dict[key] = sympify(value) + self._dict[key] = sympify(value) # type: ignore def __getstate__(self) -> dict: return self._dict @@ -250,47 +243,6 @@ def get_coeff(obj: Dict[str, Any]) -> complex: return QubitPauliOperator({get_qps(obj): get_coeff(obj) for obj in pauli_list}) - def to_OpenFermion(self) -> "QubitOperator": # type: ignore - """Convert pytket QubitPauliOperator to OpenFermion QubitOperator.""" - if not _of_installed: - raise ImportError("Install OpenFermion to use QubitOperator converters") - op = QubitOperator() - for key, value in self._dict.items(): - qubit_string = "" - for qubit, tket_pauli in key.map.items(): - if qubit.reg_name != "q": - raise ValueError("Qubit register must have default name.") - index = qubit.index - if len(index) != 1: - raise ValueError("Qubit register must be 1-dimensional.") - if tket_pauli != Pauli.I: - pauli = tket_pauli.name - qubit_string += pauli + str(index[0]) + " " - try: - coeff = complex(value) - except TypeError: - raise ValueError("QubitPauliOperator contains unevaluated symbols.") - op += QubitOperator(qubit_string, coeff) - return op - - @classmethod - def from_OpenFermion( - cls, openf_op: "QubitOperator" - ) -> "QubitPauliOperator": # type: ignore - """Convert OpenFermion QubitOperator to pytket QubitPauliOperator.""" - tk_op = dict() - if not _of_installed: - raise ImportError("Install OpenFermion to use QubitOperator converters") - for term, coeff in openf_op.terms.items(): - string = QubitPauliString( - { - Qubit(qubitnum): _STRING_TO_PAULI[paulisym] - for qubitnum, paulisym in term - } - ) - tk_op[string] = coeff - return cls(tk_op) - def to_sparse_matrix( self, qubits: Union[List[Qubit], int, None] = None ) -> "csc_matrix": @@ -411,6 +363,3 @@ def _collect_qubits(self) -> None: for key in self._dict.keys(): for q in key.map.keys(): self._all_qubits.add(q) - - -_STRING_TO_PAULI = {"I": Pauli.I, "X": Pauli.X, "Y": Pauli.Y, "Z": Pauli.Z} diff --git a/pytket/pytket/utils/symbolic.py b/pytket/pytket/utils/symbolic.py index ef3012d341..d5ab58f3a3 100644 --- a/pytket/pytket/utils/symbolic.py +++ b/pytket/pytket/utils/symbolic.py @@ -20,11 +20,11 @@ import numpy as np import sympy # type: ignore -from sympy import ( +from sympy import ( # type: ignore BlockDiagMatrix, BlockMatrix, Expr, - Identity, # type: ignore + Identity, ImmutableMatrix, Matrix, Mul, @@ -64,11 +64,11 @@ def symb_controlled(target: SymGateFunc) -> SymGateFunc: - return lambda x: ImmutableMatrix(BlockDiagMatrix(Identity(2), target(x))) + return lambda x: ImmutableMatrix(BlockDiagMatrix(Identity(2), target(x))) # type: ignore def symb_rz(params: ParamsType) -> ImmutableMatrix: - return ImmutableMatrix( + return ImmutableMatrix( # type: ignore [ [sympy.exp(-I * (sympy.pi / 2) * params[0]), 0], [0, sympy.exp(I * (sympy.pi / 2) * params[0])], @@ -79,7 +79,7 @@ def symb_rz(params: ParamsType) -> ImmutableMatrix: def symb_rx(params: ParamsType) -> ImmutableMatrix: costerm = sympy.cos((sympy.pi / 2) * params[0]) sinterm = -I * sympy.sin((sympy.pi / 2) * params[0]) - return ImmutableMatrix( + return ImmutableMatrix( # type: ignore [ [costerm, sinterm], [sinterm, costerm], @@ -90,7 +90,7 @@ def symb_rx(params: ParamsType) -> ImmutableMatrix: def symb_ry(params: ParamsType) -> ImmutableMatrix: costerm = sympy.cos((sympy.pi / 2) * params[0]) sinterm = sympy.sin((sympy.pi / 2) * params[0]) - return ImmutableMatrix( + return ImmutableMatrix( # type: ignore [ [costerm, -sinterm], [sinterm, costerm], @@ -102,7 +102,7 @@ def symb_u3(params: ParamsType) -> ImmutableMatrix: theta, phi, lam = params costerm = sympy.cos((sympy.pi / 2) * theta) sinterm = sympy.sin((sympy.pi / 2) * theta) - return ImmutableMatrix( + return ImmutableMatrix( # type: ignore [ [costerm, -sinterm * sympy.exp(I * sympy.pi * lam)], [ @@ -114,22 +114,22 @@ def symb_u3(params: ParamsType) -> ImmutableMatrix: def symb_u2(params: ParamsType) -> ImmutableMatrix: - return symb_u3([0.5] + params) + return symb_u3([0.5] + params) # type: ignore def symb_u1(params: ParamsType) -> ImmutableMatrix: - return symb_u3([0.0, 0.0] + params) + return symb_u3([0.0, 0.0] + params) # type: ignore def symb_tk1(params: ParamsType) -> ImmutableMatrix: - return symb_rz([params[0]]) * symb_rx([params[1]]) * symb_rz([params[2]]) + return symb_rz([params[0]]) * symb_rx([params[1]]) * symb_rz([params[2]]) # type: ignore def symb_iswap(params: ParamsType) -> ImmutableMatrix: alpha = params[0] costerm = sympy.cos((sympy.pi / 2) * alpha) sinterm = sympy.sin((sympy.pi / 2) * alpha) - return ImmutableMatrix( + return ImmutableMatrix( # type: ignore [ [1, 0, 0, 0], [0, costerm, I * sinterm, 0], @@ -144,7 +144,7 @@ def symb_phasediswap(params: ParamsType) -> ImmutableMatrix: costerm = sympy.cos((sympy.pi / 2) * alpha) sinterm = I * sympy.sin((sympy.pi / 2) * alpha) phase = sympy.exp(2 * I * sympy.pi * p) - return ImmutableMatrix( + return ImmutableMatrix( # type: ignore [ [1, 0, 0, 0], [0, costerm, sinterm * phase, 0], @@ -158,7 +158,7 @@ def symb_xxphase(params: ParamsType) -> ImmutableMatrix: alpha = params[0] c = sympy.cos((sympy.pi / 2) * alpha) s = -I * sympy.sin((sympy.pi / 2) * alpha) - return ImmutableMatrix( + return ImmutableMatrix( # type: ignore [ [c, 0, 0, s], [0, c, s, 0], @@ -172,7 +172,7 @@ def symb_yyphase(params: ParamsType) -> ImmutableMatrix: alpha = params[0] c = sympy.cos((sympy.pi / 2) * alpha) s = I * sympy.sin((sympy.pi / 2) * alpha) - return ImmutableMatrix( + return ImmutableMatrix( # type: ignore [ [c, 0, 0, s], [0, c, -s, 0], @@ -185,31 +185,31 @@ def symb_yyphase(params: ParamsType) -> ImmutableMatrix: def symb_zzphase(params: ParamsType) -> ImmutableMatrix: alpha = params[0] t = sympy.exp(I * (sympy.pi / 2) * alpha) - return ImmutableMatrix(diag(1 / t, t, t, 1 / t)) + return ImmutableMatrix(diag(1 / t, t, t, 1 / t)) # type: ignore def symb_xxphase3(params: ParamsType) -> ImmutableMatrix: xxphase2 = symb_xxphase(params) - res1 = matrix_tensor_product(xxphase2, eye(2)) + res1 = matrix_tensor_product(xxphase2, eye(2)) # type: ignore res2 = Matrix( - BlockMatrix( + BlockMatrix( # type: ignore [ - [xxphase2[:2, :2], zeros(2), xxphase2[:2, 2:], zeros(2)], - [zeros(2), xxphase2[:2, :2], zeros(2), xxphase2[:2, 2:]], - [xxphase2[2:, :2], zeros(2), xxphase2[2:, 2:], zeros(2)], - [zeros(2), xxphase2[2:, :2], zeros(2), xxphase2[2:, 2:]], + [xxphase2[:2, :2], zeros(2), xxphase2[:2, 2:], zeros(2)], # type: ignore + [zeros(2), xxphase2[:2, :2], zeros(2), xxphase2[:2, 2:]], # type: ignore + [xxphase2[2:, :2], zeros(2), xxphase2[2:, 2:], zeros(2)], # type: ignore + [zeros(2), xxphase2[2:, :2], zeros(2), xxphase2[2:, 2:]], # type: ignore ] ) ) - res3 = matrix_tensor_product(eye(2), xxphase2) - res = ImmutableMatrix(res1 * res2 * res3) + res3 = matrix_tensor_product(eye(2), xxphase2) # type: ignore + res = ImmutableMatrix(res1 * res2 * res3) # type: ignore return res def symb_phasedx(params: ParamsType) -> ImmutableMatrix: alpha, beta = params - return symb_rz([beta]) * symb_rx([alpha]) * symb_rz([-beta]) + return symb_rz([beta]) * symb_rx([alpha]) * symb_rz([-beta]) # type: ignore def symb_eswap(params: ParamsType) -> ImmutableMatrix: @@ -218,7 +218,7 @@ def symb_eswap(params: ParamsType) -> ImmutableMatrix: s = -I * sympy.sin((sympy.pi / 2) * alpha) t = sympy.exp(-I * (sympy.pi / 2) * alpha) - return ImmutableMatrix( + return ImmutableMatrix( # type: ignore [ [t, 0, 0, 0], [0, c, s, 0], @@ -234,7 +234,7 @@ def symb_fsim(params: ParamsType) -> ImmutableMatrix: s = -I * sympy.sin(sympy.pi * alpha) t = sympy.exp(-I * sympy.pi * beta) - return ImmutableMatrix( + return ImmutableMatrix( # type: ignore [ [1, 0, 0, 0], [0, c, s, 0], @@ -319,7 +319,7 @@ def _op_to_sympy_gate(op: Op, targets: List[int]) -> symgate.Gate: else: try: # use internal tket unitary definition - u_mat = ImmutableMatrix(op.get_unitary()) + u_mat = ImmutableMatrix(op.get_unitary()) # type: ignore except RuntimeError as e: # to catch tket failure to get Op unitary # most likely due to symbolic parameters. @@ -328,7 +328,7 @@ def _op_to_sympy_gate(op: Op, targets: List[int]) -> symgate.Gate: " Try registering your own symbolic matrix representation" " with SymGateRegister.func." ) from e - gate = symgate.UGate(targets, u_mat) + gate = symgate.UGate(targets, u_mat) # type: ignore return gate @@ -341,7 +341,7 @@ def circuit_to_symbolic_gates(circ: Circuit) -> Mul: :return: Symbolic gate multiplication expression. :rtype: Mul """ - outmat = symgate.IdentityGate(0) + outmat = symgate.IdentityGate(0) # type: ignore nqb = circ.n_qubits qubit_map = {qb: nqb - 1 - i for i, qb in enumerate(circ.qubits)} for com in circ: @@ -360,9 +360,9 @@ def circuit_to_symbolic_gates(circ: Circuit) -> Mul: outmat = gate * outmat for i in range(len(qubit_map)): - outmat = symgate.IdentityGate(i) * outmat + outmat = symgate.IdentityGate(i) * outmat # type: ignore - return outmat * sympy.exp((circ.phase * sympy.pi * I)) + return outmat * sympy.exp((circ.phase * sympy.pi * I)) # type: ignore def circuit_to_symbolic_unitary(circ: Circuit) -> ImmutableMatrix: @@ -378,18 +378,18 @@ def circuit_to_symbolic_unitary(circ: Circuit) -> ImmutableMatrix: gates = circuit_to_symbolic_gates(circ) nqb = circ.n_qubits try: - return cast(ImmutableMatrix, represent(gates, nqubits=circ.n_qubits)) + return cast(ImmutableMatrix, represent(gates, nqubits=circ.n_qubits)) # type: ignore except NotImplementedError: # sympy can't represent n>1 qubit unitaries very well # so if it fails we will just calculate columns using the statevectors # for all possible input basis states matrix_dim = 1 << nqb - input_states = (Qubit(f"{i:0{nqb}b}") for i in range(matrix_dim)) - outmat = Matrix([]) + input_states = (Qubit(f"{i:0{nqb}b}") for i in range(matrix_dim)) # type: ignore + outmat = Matrix([]) # type: ignore for col, input_state in enumerate(input_states): - outmat = outmat.col_insert(col, represent(qapply(gates * input_state))) + outmat = outmat.col_insert(col, represent(qapply(gates * input_state))) # type: ignore - return ImmutableMatrix(outmat) + return ImmutableMatrix(outmat) # type: ignore def circuit_apply_symbolic_qubit(circ: Circuit, input_qb: Expr) -> Qubit: @@ -404,7 +404,7 @@ def circuit_apply_symbolic_qubit(circ: Circuit, input_qb: Expr) -> Qubit: """ gates = circuit_to_symbolic_gates(circ) - return cast(Qubit, qapply(gates * input_qb)) + return cast(Qubit, qapply(gates * input_qb)) # type: ignore def circuit_apply_symbolic_statevector( @@ -423,10 +423,10 @@ def circuit_apply_symbolic_statevector( :rtype: ImmutableMatrix """ if input_state: - input_qb = matrix_to_qubit(input_state) + input_qb = matrix_to_qubit(input_state) # type: ignore else: - input_qb = Qubit("0" * circ.n_qubits) + input_qb = Qubit("0" * circ.n_qubits) # type: ignore return cast( ImmutableMatrix, - represent(circuit_apply_symbolic_qubit(circ, cast(Qubit, input_qb))), + represent(circuit_apply_symbolic_qubit(circ, cast(Qubit, input_qb))), # type: ignore ) diff --git a/pytket/pytket/zx/tensor_eval.py b/pytket/pytket/zx/tensor_eval.py index a922ed8e2f..a9071aa726 100644 --- a/pytket/pytket/zx/tensor_eval.py +++ b/pytket/pytket/zx/tensor_eval.py @@ -17,10 +17,10 @@ from typing import Dict, List, Any from math import floor, pi, sqrt import numpy as np -from pytket.zx import ZXDiagram, ZXType, ZXVert, BasicGen, QuantumType, Rewrite # type: ignore +from pytket.zx import ZXDiagram, ZXType, ZXVert, PhasedGen, QuantumType, Rewrite # type: ignore -def _spider_to_tensor(gen: BasicGen, rank: int) -> np.ndarray: +def _spider_to_tensor(gen: PhasedGen, rank: int) -> np.ndarray: try: if gen.type == ZXType.Hbox: param_c = complex(gen.param) diff --git a/pytket/setup.py b/pytket/setup.py index 4a55c340dd..4d8112395e 100755 --- a/pytket/setup.py +++ b/pytket/setup.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io import os import platform import re @@ -22,11 +21,12 @@ import shutil from multiprocessing import cpu_count from distutils.version import LooseVersion +from concurrent.futures import ThreadPoolExecutor as Pool +from shutil import which import setuptools # type: ignore from setuptools import setup, Extension from setuptools.command.build_ext import build_ext # type: ignore -from concurrent.futures import ThreadPoolExecutor as Pool -from shutil import which +from wheel.bdist_wheel import bdist_wheel as _bdist_wheel class CMakeExtension(Extension): @@ -122,10 +122,11 @@ def run(self): "tket-Architecture", "tket-Simulation", "tket-Diagonalisation", - "tket-Program", "tket-Characterisation", "tket-Converters", - "tket-Routing", + "tket-TokenSwapping", + "tket-Placement", + "tket-Mapping", "tket-MeasurementSetup", "tket-Transformations", "tket-ArchAwareSynth", @@ -209,14 +210,27 @@ def build_extension(self, ext): "predicates", "partition", "pauli", - "program", - "routing", + "mapping", "transform", "tailoring", "tableau", "zx", + "placement", + "architecture", ] +setup_dir = os.path.abspath(os.path.dirname(__file__)) +plat_name = os.getenv("WHEEL_PLAT_NAME") + + +class bdist_wheel(_bdist_wheel): + def finalize_options(self): + _bdist_wheel.finalize_options(self) + if plat_name is not None: + print(f"Overriding plat_name to {plat_name}") + self.plat_name = plat_name + self.plat_name_supplied = True + setup( name="pytket", @@ -224,7 +238,8 @@ def build_extension(self, ext): author_email="seyon.sivarajah@cambridgequantum.com", python_requires=">=3.8", url="https://cqcl.github.io/pytket", - description="Python module for interfacing with the CQC tket library of quantum software", + description="Python module for interfacing with the CQC tket library of quantum " + "software", license="Apache 2", packages=setuptools.find_packages(), install_requires=[ @@ -241,9 +256,7 @@ def build_extension(self, ext): ext_modules=[ CMakeExtension("pytket._tket.{}".format(binder)) for binder in binders ], - cmdclass={ - "build_ext": CMakeBuild, - }, + cmdclass={"build_ext": CMakeBuild, "bdist_wheel": bdist_wheel}, classifiers=[ "Environment :: Console", "Programming Language :: Python :: 3.8", @@ -260,4 +273,9 @@ def build_extension(self, ext): include_package_data=True, package_data={"pytket": ["py.typed"]}, zip_safe=False, + use_scm_version={ + "root": os.path.dirname(setup_dir), + "write_to": os.path.join(setup_dir, "pytket", "_version.py"), + "write_to_template": "__version__ = '{version}'", + }, ) diff --git a/pytket/tests/architecture_aware_synthesis_test.py b/pytket/tests/architecture_aware_synthesis_test.py new file mode 100644 index 0000000000..62c0540f2f --- /dev/null +++ b/pytket/tests/architecture_aware_synthesis_test.py @@ -0,0 +1,221 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytket.circuit import Circuit, OpType # type: ignore +from pytket.architecture import Architecture # type: ignore +from pytket.passes import AASRouting, CNotSynthType # type: ignore +from pytket.predicates import CompilationUnit # type: ignore + + +def test_AAS() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc, lookahead=2) + assert pass1.apply(circ) + + +def test_AAS_2() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc) + assert pass1.apply(circ) + + +def test_AAS_3() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc, lookahead=2) + assert pass1.apply(circ) + + +def test_AAS_4() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc) + assert pass1.apply(circ) + + +def test_AAS_5() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc, lookahead=2) + assert pass1.apply(circ) + + +def test_AAS_6() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc) + assert pass1.apply(circ) + + +def test_AAS_7() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.H(0).H(2) + circ.CX(0, 1).CX(1, 2).CX(3, 4) + circ.Rz(0, 1) + pass1 = AASRouting(arc, lookahead=2) + assert pass1.apply(circ) + + +def test_AAS_8() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ = Circuit(5) + circ.CX(0, 1) + circ.H(0) + circ.Z(1) + circ.CX(0, 3) + circ.Rx(1.5, 3) + circ.CX(2, 4) + circ.X(2) + circ.CX(1, 4) + circ.CX(0, 4) + pass1 = AASRouting(arc, lookahead=2) + assert pass1.apply(circ) + + +def test_AAS_9() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8]]) + circ = Circuit(9) + circ.CX(0, 8).CX(8, 1).CX(1, 7).CX(7, 2).CX(2, 6).CX(6, 3).CX(3, 5).CX(5, 4) + circ.Rz(0.5, 4) + pass1 = AASRouting(arc, lookahead=2) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() < 56 + + +def test_AAS_10() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) + circ = Circuit(7) + circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) + circ.Rz(0.5, 3) + pass1 = AASRouting(arc, lookahead=2) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() < 33 + + +def test_AAS_11() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) + circ = Circuit(7) + circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) + circ.Rz(0.5, 3) + pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.SWAP) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() == 119 + + +def test_AAS_12() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) + circ = Circuit(7) + circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) + circ.Rz(0.5, 3) + pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.HamPath) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() == 36 + + +def test_AAS_13() -> None: + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) + circ = Circuit(7) + circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) + circ.Rz(0.5, 3) + pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() == 28 + + +def test_AAS_14() -> None: + arc = Architecture([[0, 1], [1, 0], [1, 2], [2, 1]]) + circ = Circuit(3).CZ(0, 1) + pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() == 3 + + +def test_AAS_15() -> None: + arc = Architecture([[0, 1], [1, 0], [1, 2], [2, 1]]) + circ = Circuit(2).CZ(0, 1) + pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) + cu = CompilationUnit(circ) + assert pass1.apply(cu) + out_circ = cu.circuit + assert out_circ.valid_connectivity(arc, False, True) + assert out_circ.depth() == 3 + + +def test_noncontiguous_arc_phase_poly() -> None: + # testing non-contiguous ascending named nodes + arc = Architecture([[0, 2]]) + pass1 = AASRouting(arc, lookahead=1) + c = Circuit(2).H(0).H(1) + pass1.apply(c) + assert c.n_gates_of_type(OpType.H) == 2 + assert c.n_gates_of_type(OpType.CX) == 0 + assert c.n_gates_of_type(OpType.CX) == 0 + + +if __name__ == "__main__": + test_AAS() + test_AAS_2() + test_AAS_3() + test_AAS_4() + test_AAS_5() + test_AAS_6() + test_AAS_7() + test_AAS_8() + test_AAS_9() + test_AAS_10() + test_AAS_11() + test_AAS_12() + test_AAS_13() + test_AAS_14() + test_AAS_15() + test_noncontiguous_arc_phase_poly() diff --git a/pytket/tests/architecture_test.py b/pytket/tests/architecture_test.py new file mode 100644 index 0000000000..d41740554b --- /dev/null +++ b/pytket/tests/architecture_test.py @@ -0,0 +1,103 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytket.circuit import Node, Op, OpType, Circuit, Qubit, PhasePolyBox # type: ignore +from pytket.architecture import Architecture, SquareGrid, FullyConnected # type: ignore +import numpy as np + + +def test_architectures() -> None: + basic_index_coupling = [(0, 1), (2, 1), (2, 3), (4, 3)] + basic_index_architecture = Architecture(basic_index_coupling) + basic_index_coupling_convert = [ + (Node(0), Node(1)), + (Node(2), Node(1)), + (Node(2), Node(3)), + (Node(4), Node(3)), + ] + assert basic_index_architecture.coupling == basic_index_coupling_convert + + node_0 = Node("example_register", 0) + node_1 = Node("example_register", 1) + node_2 = Node("example_register", 2) + node_3 = Node("example_register", 3) + basic_uid_coupling = [(node_0, node_1), (node_1, node_2), (node_2, node_3)] + basic_uid_architecture = Architecture(basic_uid_coupling) + assert basic_uid_architecture.coupling == basic_uid_coupling + + square_arc = SquareGrid(2, 2, 2) + assert square_arc.nodes[0] == Node("gridNode", [0, 0, 0]) + assert square_arc.coupling[0] == ( + Node("gridNode", [0, 0, 0]), + Node("gridNode", [0, 1, 0]), + ) + + +def test_architecture_eq() -> None: + coupling = [(1, 2), (3, 4), (0, 6), (0, 3)] + arc = Architecture(coupling) + + assert arc != Architecture([]) + assert arc == Architecture(coupling) + assert arc == Architecture([(Node(i), Node(j)) for (i, j) in coupling]) + assert arc != Architecture([(Node("s", i), Node("s", j)) for (i, j) in coupling]) + + # only Node IDs and coupling matters + g00, g01, g10, g11 = [ + Node("gridNode", [i, j, 0]) for i in range(2) for j in range(2) + ] + sq_arc = Architecture([(g00, g01), (g01, g11), (g00, g10), (g10, g11)]) + assert sq_arc == SquareGrid(2, 2) + assert sq_arc != Architecture([(g00, g01), (g01, g11), (g00, g10)]) + + +def test_fully_connected() -> None: + fc = FullyConnected(3) + assert fc.nodes == [Node("fcNode", i) for i in range(3)] + d = fc.to_dict() + fc1 = FullyConnected.from_dict(d) + assert fc == fc1 + + +def test_arch_types() -> None: + arch = Architecture([(0, 1)]) + assert isinstance(arch, Architecture) + fc = FullyConnected(2) + assert isinstance(fc, FullyConnected) + sg = SquareGrid(2, 2, 2) + assert isinstance(sg, SquareGrid) + + +def test_valid_operation() -> None: + edges = [(0, 1), (1, 2), (2, 0), (0, 3), (3, 4), (4, 5), (5, 6)] + arc = Architecture(edges) + + assert not arc.valid_operation([Node(1), Node(3)]) + assert arc.valid_operation([Node(0)]) + assert arc.valid_operation([Node(0), Node(1)]) + assert not arc.valid_operation([Node(0), Node(1), Node(2)]) + assert not arc.valid_operation([Node(10)]) + assert not arc.valid_operation([Node(10), Node(11), Node(15)]) + assert not arc.valid_operation([Node(0), Node(1), Node(2), Node(3)]) + assert not arc.valid_operation([Node(0), Node(4)]) + assert not arc.valid_operation([Node(0), Node(1), Node(2)]) + assert not arc.valid_operation([Node(0), Node(1), Node(4)]) + + +if __name__ == "__main__": + test_architectures() + test_architecture_eq() + test_fully_connected() + test_arch_types() + test_valid_operation() diff --git a/pytket/tests/backend_test.py b/pytket/tests/backend_test.py index cda582af43..f7c1213194 100644 --- a/pytket/tests/backend_test.py +++ b/pytket/tests/backend_test.py @@ -24,7 +24,8 @@ from pytket.circuit import Circuit, OpType, BasisOrder, Qubit, Bit, Node # type: ignore from pytket.predicates import CompilationUnit # type: ignore from pytket.passes import PauliSimp, CliffordSimp, ContextSimp # type: ignore -from pytket.routing import Architecture, route # type: ignore +from pytket.mapping import MappingManager, LexiRouteRoutingMethod, LexiLabellingMethod # type: ignore +from pytket.architecture import Architecture # type: ignore from pytket.utils.outcomearray import OutcomeArray, readout_counts from pytket.utils.prepare import prepare_circuit from pytket.backends import CircuitNotValidError @@ -400,7 +401,7 @@ def test_empty_result(n_shots, n_bits) -> None: message=strategies.text(), ) @settings(deadline=None) -def test_status_serialization(status: StatusEnum, message: str) -> None: +def test_status_serialization_basic(status: StatusEnum, message: str) -> None: c_stat = CircuitStatus(status, message) assert CircuitStatus.from_dict(c_stat.to_dict()) == c_stat with pytest.raises(ValueError) as errorinfo: @@ -408,6 +409,14 @@ def test_status_serialization(status: StatusEnum, message: str) -> None: assert "invalid format" in str(errorinfo.value) +@given( + c_stat=strategies.builds(CircuitStatus), +) +@settings(deadline=None) +def test_status_serialization(c_stat: CircuitStatus) -> None: + assert CircuitStatus.from_dict(c_stat.to_dict()) == c_stat + + def test_shots_with_unmeasured() -> None: # TKET-1193 b = TketSimShotBackend() @@ -524,7 +533,10 @@ def test_postprocess_3() -> None: qbs = [Node("qn", i) for i in range(4)] arc = Architecture([[qbs[i], qbs[i + 1]] for i in range(3)]) c = Circuit(3, 3).H(0).CX(0, 2).measure_all() - rc = route(c, arc) + + mm = MappingManager(arc) + rc = c.copy() + mm.route_circuit(rc, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) n_shots = 100 h = b.process_circuit(b.get_compiled_circuit(c), n_shots=n_shots, postprocess=True) r = b.get_result(h) diff --git a/pytket/tests/backendinfo_test.py b/pytket/tests/backendinfo_test.py index 494cec7f39..74ea91f4d8 100644 --- a/pytket/tests/backendinfo_test.py +++ b/pytket/tests/backendinfo_test.py @@ -23,7 +23,7 @@ import pytest # type: ignore from pytket.backends.backendinfo import BackendInfo, fully_connected_backendinfo -from pytket.routing import SquareGrid, FullyConnected # type: ignore +from pytket.architecture import SquareGrid, FullyConnected # type: ignore from pytket.circuit import OpType, Node # type: ignore import strategies as st # type: ignore @@ -124,14 +124,21 @@ def test_serialization() -> None: def test_to_json() -> None: bi = BackendInfo( - "name", - "device_name", - "version", - SquareGrid(3, 4), - {OpType.CX, OpType.Rx}, - True, - True, - True, + name="name", + device_name="device_name", + version="version", + architecture=SquareGrid(3, 4), + gate_set={OpType.CX, OpType.Rx}, + supports_fast_feedforward=True, + supports_reset=True, + supports_midcircuit_measurement=True, + all_node_gate_errors={Node(0): {OpType.Rx: 0.1}}, + all_edge_gate_errors={(Node(0), Node(1)): {OpType.Rx: 0.1}}, + all_readout_errors={Node(0): [[0.1]]}, + averaged_node_gate_errors={Node(0): 0.1}, + averaged_edge_gate_errors={(Node(0), Node(1)): 0.1}, + averaged_readout_errors={Node(0): 0.1}, + misc={"region": "UK"}, ) bi_dict = bi.to_dict() json_bi = dumps(bi_dict) diff --git a/pytket/tests/circuit_test.py b/pytket/tests/circuit_test.py index 9a8b6d6a47..93fd45814a 100644 --- a/pytket/tests/circuit_test.py +++ b/pytket/tests/circuit_test.py @@ -33,6 +33,8 @@ CustomGateDef, Qubit, Bit, + BitRegister, + QubitRegister, ) from pytket.circuit.display import render_circuit_as_html @@ -64,7 +66,7 @@ def test_op_free_symbols() -> None: c.add_barrier([0, 1]) op = c.get_commands()[0].op assert op.free_symbols() == set() - alpha = Symbol("alpha") + alpha = Symbol("alpha") # type: ignore c.Rx(alpha, 0) op = c.get_commands()[1].op assert op.free_symbols() == {alpha} @@ -207,7 +209,7 @@ def test_circuit_gen_ids() -> None: def test_symbolic_ops() -> None: c = Circuit(2) - alpha = Symbol("alpha") + alpha = Symbol("alpha") # type: ignore c.Rx(alpha, 0) beta = fresh_symbol("alpha") c.CRz(beta * 2, 1, 0) @@ -241,7 +243,7 @@ def get_type_tree(expr: sympy.Expr) -> str: tree_str = str(type(expr)).rsplit(".", 1)[-1].split("'")[0] if len(expr.args) != 0: tree_str += " (" - tree_str += ", ".join([get_type_tree(a) for a in expr.args]) + tree_str += ", ".join([get_type_tree(a) for a in expr.args]) # type: ignore tree_str += ")" return tree_str @@ -257,7 +259,7 @@ def get_type_tree(expr: sympy.Expr) -> str: } for expr_string, type_tree in test_dict.items(): c = Circuit(1) - c.Rz(sympify(expr_string), 0) + c.Rz(sympify(expr_string), 0) # type: ignore com = c.get_commands()[0] assert get_type_tree(com.op.params[0]) == type_tree @@ -378,7 +380,7 @@ def test_boxes() -> None: d.add_expbox(ebox, 3, 2) paulis = [Pauli.X, Pauli.Z, Pauli.X] - pbox = PauliExpBox(paulis, Symbol("alpha")) + pbox = PauliExpBox(paulis, Symbol("alpha")) # type: ignore assert pbox.type == OpType.PauliExpBox d.add_pauliexpbox(pbox, [3, 2, 1]) @@ -393,16 +395,39 @@ def test_boxes() -> None: pauli_exps = [cmd.op for cmd in d if cmd.op.type == OpType.PauliExpBox] assert len(pauli_exps) == 1 assert pauli_exps[0].get_paulis() == paulis - assert pauli_exps[0].get_phase() == Symbol("alpha") + assert pauli_exps[0].get_phase() == Symbol("alpha") # type: ignore boxes = (cbox, mbox, u2qbox, u3qbox, ebox, pbox, qcbox) assert all(box == box for box in boxes) assert all(isinstance(box, Op) for box in boxes) +def test_u1q_stability() -> None: + # https://github.com/CQCL/tket/issues/222 + u = np.array( + [ + [ + -1.0000000000000000e00 + 0.0000000000000000e00j, + -4.7624091282918654e-10 + 2.0295010872500105e-16j, + ], + [ + 4.5447577055178555e-10 - 1.4232772405184710e-10j, + -9.5429791447115209e-01 + 2.9885697320961047e-01j, + ], + ] + ) + ubox = Unitary1qBox(u) + op = ubox.get_circuit().get_commands()[0].op + assert op.type == OpType.TK1 + a, b, c = op.params + assert np.isfinite(a) + assert np.isfinite(b) + assert np.isfinite(c) + + def test_custom_gates() -> None: - a = Symbol("a") - b = Symbol("b") + a = Symbol("a") # type: ignore + b = Symbol("b") # type: ignore setup = Circuit(3) setup.CX(0, 1) setup.Rz(a + 0.5, 2) @@ -423,7 +448,7 @@ def test_custom_gates() -> None: def test_errors() -> None: # TKET-289 c = Circuit(1) - a = Symbol("a") + a = Symbol("a") # type: ignore c.Rz(a, 0) c.Rz(0.5, 0) c.Rz(0, 0) @@ -653,13 +678,16 @@ def test_opgroups() -> None: # Remove a redundant gate c = Circuit(3).H(0) + assert len(c.opgroups) == 0 c.CX(0, 1, opgroup="cx0") c.CX(1, 2, opgroup="cx1") c.CX(2, 0, opgroup="cx2") c.CX(0, 1, opgroup="cx3") + assert c.opgroups == {"cx0", "cx1", "cx2", "cx3"} c.substitute_named(Circuit(2), "cx3") assert c.n_gates == 4 assert c.n_gates_of_type(OpType.CX) == 3 + assert c.opgroups == {"cx0", "cx1", "cx2"} def test_phase_polybox() -> None: @@ -678,6 +706,26 @@ def test_phase_polybox() -> None: assert np.array_equal(p_box.linear_transformation, linear_transformation) +def test_phase_polybox_big() -> None: + c = Circuit(3, 3) + n_qb = 3 + qubit_indices = {Qubit(0): 0, Qubit(1): 1, Qubit(2): 2} + phase_polynomial = { + (True, False, True): 0.333, + (False, False, True): 0.05, + (False, False, False): 1.05, + } + linear_transformation = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + p_box = PhasePolyBox(n_qb, qubit_indices, phase_polynomial, linear_transformation) + + c.add_phasepolybox(p_box, [0, 1, 2]) + assert p_box.n_qubits == n_qb + assert p_box.qubit_indices == qubit_indices + assert p_box.phase_polynomial == phase_polynomial + assert np.array_equal(p_box.linear_transformation, linear_transformation) + assert DecomposeBoxes().apply(c) + + def test_depth() -> None: c = Circuit(3) c.H(0).H(1).CX(1, 2).CZ(0, 1).H(1).CZ(1, 2) @@ -717,6 +765,45 @@ def test_clifford_checking() -> None: assert m.is_clifford_type() == False +def test_getting_registers() -> None: + c = Circuit(2, 1) + c_regs = c.c_registers + assert len(c_regs) == 1 + assert c_regs[0] == BitRegister("c", 1) + q_regs = c.q_registers + assert len(q_regs) == 1 + assert q_regs[0] == QubitRegister("q", 2) + q_err_msg = "Cannot find quantum register with name" + c_err_msg = "Cannot find classical register with name" + with pytest.raises(RuntimeError) as e: + c.get_c_register("q") + assert c_err_msg in str(e.value) + with pytest.raises(RuntimeError) as e: + c.get_q_register("c") + assert q_err_msg in str(e.value) + assert c.get_c_register("c").name == "c" + assert c.get_c_register("c").size == 1 + assert c.get_q_register("q").name == "q" + assert c.get_q_register("q").size == 2 + c.add_q_register("test_qr", 10) + c.add_c_register("test_cr", 8) + assert c.get_c_register("test_cr").name == "test_cr" + assert c.get_c_register("test_cr").size == 8 + assert c.get_q_register("test_qr").name == "test_qr" + assert c.get_q_register("test_qr").size == 10 + + c_regs = c.c_registers + c_regs.sort() + assert len(c_regs) == 2 + assert c_regs[0] == BitRegister("c", 1) + assert c_regs[1] == BitRegister("test_cr", 8) + q_regs = c.q_registers + q_regs.sort() + assert len(q_regs) == 2 + assert q_regs[0] == QubitRegister("q", 2) + assert q_regs[1] == QubitRegister("test_qr", 10) + + if __name__ == "__main__": test_circuit_gen() test_symbolic_ops() diff --git a/pytket/tests/cli-test b/pytket/tests/cli-test deleted file mode 100755 index e494bad90c..0000000000 --- a/pytket/tests/cli-test +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python3 - -# Copyright 2019-2022 Cambridge Quantum Computing -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import subprocess - -exe = os.path.join(os.curdir, "tket") - -if __name__ == "__main__": - process = subprocess.Popen([exe], stdout=subprocess.PIPE, universal_newlines=True) - out, _ = process.communicate() - assert process.returncode == 0 - - process = subprocess.Popen( - [exe, "--list-passes"], stdout=subprocess.PIPE, universal_newlines=True - ) - out, _ = process.communicate() - assert process.returncode == 0 - assert "Available passes:" in out - - qasmfile = os.path.join(os.curdir, "tests", "qasm_test_files", "test1.qasm") - - latexfile = "test1.latex" - process = subprocess.Popen( - [ - exe, - "--infile", - qasmfile, - "--informat", - "qasm", - "--outfile", - latexfile, - "--outformat", - "latex", - ], - stdout=subprocess.PIPE, - universal_newlines=True, - ) - out, _ = process.communicate() - assert process.returncode == 0 - with open(latexfile) as f: - latex = f.read() - assert latex.startswith("\\documentclass[tikz]{standalone}") - os.remove(latexfile) - - # REQUIRES PYTKET_QISKIT - # process = subprocess.Popen( - # [ - # exe, - # "--infile", - # qasmfile, - # "--informat", - # "qasm", - # "--backend", - # "AerStateBackend", - # "--passname", - # "default", - # "--run", - # "--show-state", - # ], - # stdout=subprocess.PIPE, - # universal_newlines=True, - # ) - # out, _ = process.communicate() - # assert process.returncode == 0 - # assert "[-1.673" in out - - process = subprocess.Popen( - [exe, "--wibble"], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True, - ) - out, _ = process.communicate() - assert process.returncode != 0 - assert "usage:" in out diff --git a/pytket/tests/mapping_test.py b/pytket/tests/mapping_test.py new file mode 100644 index 0000000000..b64372fef4 --- /dev/null +++ b/pytket/tests/mapping_test.py @@ -0,0 +1,434 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytket.mapping import ( # type: ignore + MappingManager, + RoutingMethodCircuit, + LexiRouteRoutingMethod, + AASRouteRoutingMethod, + LexiLabellingMethod, + AASLabellingMethod, + MultiGateReorderRoutingMethod, + BoxDecompositionRoutingMethod, +) +from pytket.architecture import Architecture # type: ignore +from pytket import Circuit, OpType +from pytket.circuit import Node, PhasePolyBox, Qubit, CircBox # type: ignore +from pytket.placement import Placement # type: ignore +from typing import Tuple, Dict +import numpy as np + +# simple deterministic heuristic used for testing purposes +def route_subcircuit_func( + circuit: Circuit, architecture: Architecture +) -> Tuple[bool, Circuit, Dict[Node, Node], Dict[Node, Node]]: + # make a replacement circuit with identical unitds + replacement_circuit = Circuit() + for qb in circuit.qubits: + replacement_circuit.add_qubit(qb) + for bit in circuit.bits: + replacement_circuit.add_bit(bit) + + # "place" unassigned logical qubits to physical qubits + unused_nodes = list(architecture.nodes) + relabelling_map = dict() + + for qb in circuit.qubits: + if qb in unused_nodes: + unused_nodes.remove(qb) + + for qb in circuit.qubits: + if qb not in architecture.nodes: + relabelling_map[qb] = unused_nodes.pop() + else: + # this is so later architecture.get_distance works + # yes this is obviously bad, buts its a simple test heuristic so who cares?! + relabelling_map[qb] = qb + + replacement_circuit.rename_units(relabelling_map) + permutation_map = dict() + for qb in replacement_circuit.qubits: + permutation_map[qb] = qb + + # very simple heuristic -> the first time a physically invalid CX is encountered, add a SWAP + # then add all remaining gates as is (using updated physical mapping) + # note this is possible as routing accepts partially solved problems + max_swaps = 1 + swaps_added = 0 + for com in circuit.get_commands(): + rp_qubits = [permutation_map[relabelling_map[q]] for q in com.qubits] + if len(com.qubits) > 2: + return (False, Circuit(), {}, {}) + if len(com.qubits) == 1: + replacement_circuit.add_gate(com.op.type, rp_qubits) + if len(com.qubits) == 2: + if swaps_added < max_swaps: + for n in architecture.nodes: + if n == rp_qubits[0]: + n0 = n + if n == rp_qubits[1]: + n1 = n + distance = architecture.get_distance(n0, n1) + if distance > 1: + for node in architecture.get_adjacent_nodes(n0): + if architecture.get_distance( + node, n1 + ) < architecture.get_distance(n0, n1): + replacement_circuit.add_gate( + OpType.SWAP, [rp_qubits[0], node] + ) + + permutation_map[rp_qubits[0]] = node + permutation_map[node] = rp_qubits[0] + rp_qubits = [ + permutation_map[relabelling_map[q]] for q in com.qubits + ] + swaps_added += 1 + break + + replacement_circuit.add_gate(com.op.type, rp_qubits) + + return (True, replacement_circuit, relabelling_map, permutation_map) + + +def route_subcircuit_func_false( + circuit: Circuit, architecture: Architecture +) -> Tuple[bool, Circuit, Dict[Node, Node], Dict[Node, Node]]: + return (False, Circuit(), {}, {}) + + +def test_LexiRouteRoutingMethod() -> None: + test_c = Circuit(3).CX(0, 1).CX(0, 2).CX(1, 2) + nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] + test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) + test_mm = MappingManager(test_a) + test_mm.route_circuit(test_c, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) + routed_commands = test_c.get_commands() + + assert routed_commands[0].op.type == OpType.CX + assert routed_commands[0].qubits == [nodes[1], nodes[0]] + assert routed_commands[1].op.type == OpType.CX + assert routed_commands[1].qubits == [nodes[1], nodes[2]] + assert routed_commands[2].op.type == OpType.SWAP + assert routed_commands[2].qubits == [nodes[2], nodes[1]] + assert routed_commands[3].op.type == OpType.CX + assert routed_commands[3].qubits == [nodes[0], nodes[1]] + + +def test_AASRouteRoutingMethod() -> None: + test_c = Circuit(3, 3) + n_qb = 3 + qubit_indices = {Qubit(0): 0, Qubit(1): 1, Qubit(2): 2} + phase_polynomial = {(True, False, True): 0.333, (False, False, True): 0.05} + linear_transformation = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + p_box = PhasePolyBox(n_qb, qubit_indices, phase_polynomial, linear_transformation) + + test_c.add_phasepolybox(p_box, [0, 1, 2]) + + test_c.CX(0, 1).CX(0, 2).CX(1, 2) + nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] + test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) + test_mm = MappingManager(test_a) + test_mm.route_circuit( + test_c, + [ + AASRouteRoutingMethod(1), + LexiLabellingMethod(), + LexiRouteRoutingMethod(), + AASLabellingMethod(), + ], + ) + + +def test_AASRouteRoutingMethod_2() -> None: + test_c = Circuit(3, 3) + n_qb = 3 + qubit_indices = {Qubit(0): 0, Qubit(1): 1, Qubit(2): 2} + phase_polynomial = {(True, False, False): 0.333} + linear_transformation = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + p_box = PhasePolyBox(n_qb, qubit_indices, phase_polynomial, linear_transformation) + + test_c.add_phasepolybox(p_box, [0, 1, 2]) + + nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] + test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) + test_mm = MappingManager(test_a) + test_mm.route_circuit( + test_c, + [ + AASRouteRoutingMethod(1), + LexiLabellingMethod(), + LexiRouteRoutingMethod(), + AASLabellingMethod(), + ], + ) + routed_commands = test_c.get_commands() + + assert routed_commands[0].op.type == OpType.Rz + assert routed_commands[0].qubits == [nodes[0]] + assert len(routed_commands) == 1 + + +def test_AASRouteRoutingMethod_3() -> None: + test_c = Circuit(3, 3) + n_qb = 3 + qubit_indices = {Qubit(0): 0, Qubit(1): 1, Qubit(2): 2} + phase_polynomial = {(True, True, False): 0.333} + linear_transformation = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + p_box = PhasePolyBox(n_qb, qubit_indices, phase_polynomial, linear_transformation) + + test_c.add_phasepolybox(p_box, [0, 1, 2]) + + nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] + test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) + test_mm = MappingManager(test_a) + test_mm.route_circuit( + test_c, + [ + AASRouteRoutingMethod(1), + AASLabellingMethod(), + ], + ) + routed_commands = test_c.get_commands() + + assert routed_commands[0].op.type == OpType.CX + assert routed_commands[0].qubits == [nodes[0], nodes[1]] + assert routed_commands[1].op.type == OpType.Rz + assert routed_commands[1].qubits == [nodes[1]] + assert routed_commands[2].op.type == OpType.CX + assert routed_commands[2].qubits == [nodes[0], nodes[1]] + assert len(routed_commands) == 3 + + +def test_AASRouteRoutingMethod_4() -> None: + test_c = Circuit(3, 3) + n_qb = 3 + qubit_indices = {Qubit(0): 0, Qubit(1): 1, Qubit(2): 2} + phase_polynomial = {(True, True, False): 0.333} + linear_transformation = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + p_box = PhasePolyBox(n_qb, qubit_indices, phase_polynomial, linear_transformation) + + test_c.add_phasepolybox(p_box, [0, 1, 2]) + test_c.CX(0, 1) + + nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] + test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) + test_mm = MappingManager(test_a) + test_mm.route_circuit( + test_c, + [ + AASRouteRoutingMethod(1), + LexiLabellingMethod(), + LexiRouteRoutingMethod(), + AASLabellingMethod(), + ], + ) + routed_commands = test_c.get_commands() + + assert routed_commands[0].op.type == OpType.CX + assert routed_commands[0].qubits == [nodes[0], nodes[1]] + assert routed_commands[1].op.type == OpType.Rz + assert routed_commands[1].qubits == [nodes[1]] + assert routed_commands[2].op.type == OpType.CX + assert routed_commands[2].qubits == [nodes[0], nodes[1]] + assert routed_commands[3].op.type == OpType.CX + assert routed_commands[3].qubits == [nodes[0], nodes[1]] + assert len(routed_commands) == 4 + + +def test_RoutingMethodCircuit_custom() -> None: + test_c = Circuit(3).CX(0, 1).CX(0, 2).CX(1, 2) + nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] + test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) + + test_mm = MappingManager(test_a) + test_mm.route_circuit( + test_c, + [RoutingMethodCircuit(route_subcircuit_func, 5, 5)], + ) + routed_commands = test_c.get_commands() + + assert routed_commands[0].op.type == OpType.CX + assert routed_commands[0].qubits == [nodes[0], nodes[1]] + assert routed_commands[1].op.type == OpType.SWAP + assert routed_commands[1].qubits == [nodes[0], nodes[1]] + assert routed_commands[2].op.type == OpType.CX + assert routed_commands[2].qubits == [nodes[1], nodes[2]] + assert routed_commands[3].op.type == OpType.SWAP + assert routed_commands[3].qubits == [nodes[0], nodes[1]] + assert routed_commands[4].op.type == OpType.CX + assert routed_commands[4].qubits == [nodes[1], nodes[2]] + + +def test_RoutingMethodCircuit_custom_list() -> None: + test_c = Circuit(3).CX(0, 1).CX(0, 2).CX(1, 2) + nodes = [Node("test", 0), Node("test", 1), Node("test", 2)] + test_a = Architecture([[nodes[0], nodes[1]], [nodes[1], nodes[2]]]) + + test_mm = MappingManager(test_a) + test_mm.route_circuit( + test_c, + [ + RoutingMethodCircuit(route_subcircuit_func_false, 5, 5), + LexiLabellingMethod(), + LexiRouteRoutingMethod(), + ], + ) + routed_commands = test_c.get_commands() + assert routed_commands[0].op.type == OpType.CX + assert routed_commands[0].qubits == [nodes[1], nodes[0]] + assert routed_commands[1].op.type == OpType.CX + assert routed_commands[1].qubits == [nodes[1], nodes[2]] + assert routed_commands[2].op.type == OpType.SWAP + assert routed_commands[2].qubits == [nodes[2], nodes[1]] + assert routed_commands[3].op.type == OpType.CX + assert routed_commands[3].qubits == [nodes[0], nodes[1]] + + test_c = Circuit(3).CX(0, 1).CX(0, 2).CX(1, 2) + test_mm.route_circuit( + test_c, + [ + RoutingMethodCircuit(route_subcircuit_func, 5, 5), + LexiLabellingMethod(), + LexiRouteRoutingMethod(), + ], + ) + routed_commands = test_c.get_commands() + assert routed_commands[0].op.type == OpType.CX + assert routed_commands[0].qubits == [nodes[0], nodes[1]] + assert routed_commands[1].op.type == OpType.SWAP + assert routed_commands[1].qubits == [nodes[0], nodes[1]] + assert routed_commands[2].op.type == OpType.CX + assert routed_commands[2].qubits == [nodes[1], nodes[2]] + assert routed_commands[3].op.type == OpType.SWAP + assert routed_commands[3].qubits == [nodes[0], nodes[1]] + assert routed_commands[4].op.type == OpType.CX + assert routed_commands[4].qubits == [nodes[1], nodes[2]] + + +def test_basic_mapping() -> None: + circ = Circuit(5) + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ.CX(0, 1) + circ.CX(0, 3) + circ.CX(2, 4) + circ.CX(1, 4) + circ.CX(0, 4) + + init_map = dict() + init_map[Qubit(0)] = Node(0) + init_map[Qubit(1)] = Node(1) + init_map[Qubit(2)] = Node(2) + init_map[Qubit(3)] = Node(3) + init_map[Qubit(4)] = Node(4) + pl = Placement(arc) + pl.place_with_map(circ, init_map) + MappingManager(arc).route_circuit(circ, [LexiRouteRoutingMethod(50)]) + assert circ.valid_connectivity(arc, directed=False) + assert len(circ.get_commands()) == 10 + + +def test_MultiGateReorderRoutingMethod() -> None: + circ = Circuit(5) + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + # Invalid opration + circ.CZ(0, 2) + # Valid operations that can all be commuted to the front + circ.CZ(0, 1) + circ.CZ(1, 2) + circ.CZ(3, 2) + circ.CX(3, 4) + + init_map = dict() + init_map[Qubit(0)] = Node(0) + init_map[Qubit(1)] = Node(1) + init_map[Qubit(2)] = Node(2) + init_map[Qubit(3)] = Node(3) + init_map[Qubit(4)] = Node(4) + pl = Placement(arc) + pl.place_with_map(circ, init_map) + # LexiRouteRoutingMethod should insert exactly one SWAP to route the final CZ gate + MappingManager(arc).route_circuit( + circ, [MultiGateReorderRoutingMethod(10, 10), LexiRouteRoutingMethod(50)] + ) + assert circ.valid_connectivity(arc, directed=False) + assert len(circ.get_commands()) == 6 + + +def test_MultiGateReorderRoutingMethod_with_LexiLabelling() -> None: + circ = Circuit(4) + arc = Architecture([[0, 1], [1, 2], [2, 3], [0, 3]]) + + # LexiLabellingMethod should label the circuit such that the following 4 ops are valid + circ.CX(0, 1) + circ.CX(1, 2) + circ.CX(2, 3) + circ.CX(0, 3) + + # Invalid CV + circ.CV(0, 2) + + # The next op should be commuted to the front of the previous CV + circ.CZ(0, 1) + + # LexiRouteRoutingMethod should insert exactly one SWAP to route the CV gate + MappingManager(arc).route_circuit( + circ, + [ + LexiLabellingMethod(), + MultiGateReorderRoutingMethod(10, 10), + LexiRouteRoutingMethod(50), + ], + ) + assert circ.valid_connectivity(arc, directed=False) + commands = circ.get_commands() + assert len(commands) == 7 + assert commands[4].op.type == OpType.CZ + assert commands[5].op.type == OpType.SWAP + + +def test_BoxDecompositionRoutingMethod() -> None: + circ = Circuit(5) + sub_circ = Circuit(5) + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + # Invalid oprations + sub_circ.CZ(0, 2) + sub_circ.CZ(1, 3) + circ_box = CircBox(sub_circ) + circ.add_circbox(circ_box, [0, 1, 2, 3, 4]) + circ.CZ(1, 3) + + init_map = dict() + init_map[Qubit(0)] = Node(0) + init_map[Qubit(1)] = Node(1) + init_map[Qubit(2)] = Node(2) + init_map[Qubit(3)] = Node(3) + init_map[Qubit(4)] = Node(4) + pl = Placement(arc) + pl.place_with_map(circ, init_map) + # LexiRouteRoutingMethod should insert exactly one SWAP + MappingManager(arc).route_circuit( + circ, [BoxDecompositionRoutingMethod(), LexiRouteRoutingMethod(50)] + ) + assert circ.valid_connectivity(arc, directed=False) + assert len(circ.get_commands()) == 4 + + +if __name__ == "__main__": + test_LexiRouteRoutingMethod() + test_RoutingMethodCircuit_custom() + test_RoutingMethodCircuit_custom_list() + test_basic_mapping() + test_MultiGateReorderRoutingMethod() + test_BoxDecompositionRoutingMethod() diff --git a/pytket/tests/mitigation_test.py b/pytket/tests/mitigation_test.py index f448fc26a6..494b3993e1 100644 --- a/pytket/tests/mitigation_test.py +++ b/pytket/tests/mitigation_test.py @@ -16,8 +16,10 @@ import json from pytket.utils.spam import SpamCorrecter, compress_counts -from pytket.circuit import Node, Circuit # type: ignore -from pytket.routing import Architecture, route # type: ignore +from pytket.circuit import Node, Circuit, Qubit # type: ignore +from pytket.mapping import MappingManager, LexiLabellingMethod, LexiRouteRoutingMethod # type: ignore +from pytket.architecture import Architecture # type: ignore +from pytket.placement import place_with_map # type: ignore from pytket.passes import DelayMeasures # type: ignore from typing import List, Dict, Counter, Tuple from pytket.utils.outcomearray import OutcomeArray @@ -106,7 +108,11 @@ def test_spam_integration() -> None: assert spam.characterisation_matrices[1].shape == (2, 2) bellcc = Circuit(3, 3).H(0).CX(0, 2).measure_all() - rbell = route(bellcc, arc) + qmap = {Qubit(0): qbs[1], Qubit(1): qbs[2], Qubit(2): qbs[0]} + place_with_map(bellcc, qmap) + mm = MappingManager(arc) + rbell = bellcc.copy() + mm.route_circuit(rbell, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) def check_correction( counts0: Dict[Tuple[int, ...], int], counts1: Dict[Tuple[int, ...], int] @@ -501,7 +507,9 @@ def test_spam_routing() -> None: arc = Architecture([[qbs[i], qbs[i + 1]] for i in range(8)] + [[qbs[0], qbs[4]]]) testc = Circuit(4, 4).H(0).CX(0, 3).CX(1, 2).CX(0, 1).CX(3, 2).measure_all() - routed = route(testc, arc) + routed = testc.copy() + mm = MappingManager(arc) + mm.route_circuit(routed, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) DelayMeasures().apply(routed) readout = routed.qubit_readout diff --git a/pytket/tests/placement_test.py b/pytket/tests/placement_test.py new file mode 100644 index 0000000000..23149db5c9 --- /dev/null +++ b/pytket/tests/placement_test.py @@ -0,0 +1,251 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path +from pytket import Circuit # type: ignore +from pytket.circuit import Node, Qubit # type: ignore +from pytket.architecture import Architecture # type: ignore +from pytket.placement import ( # type: ignore + Placement, + LinePlacement, + GraphPlacement, + NoiseAwarePlacement, + place_with_map, +) +from pytket.passes import PauliSimp, DefaultMappingPass # type: ignore +from pytket.mapping import MappingManager, LexiRouteRoutingMethod, LexiLabellingMethod # type: ignore +from pytket.qasm import circuit_from_qasm # type: ignore + +import json + + +def test_placements() -> None: + test_coupling = [(0, 1), (1, 2), (1, 3), (4, 1), (4, 5)] + test_architecture = Architecture(test_coupling) + circ = Circuit(6) + for pair in test_coupling: + circ.CX(pair[0], pair[1]) + circ_qbs = circ.qubits + base_pl = Placement(test_architecture) + line_pl = LinePlacement(test_architecture) + graph_pl = GraphPlacement(test_architecture) + base_placed = circ.copy() + line_placed = circ.copy() + graph_placed = circ.copy() + + base_map = base_pl.get_placement_map(circ) + line_map = line_pl.get_placement_map(circ) + graph_map = graph_pl.get_placement_map(circ) + + assert base_map != line_map + assert base_map != graph_map + assert circ.qubits == circ_qbs + + base_pl.place(base_placed) + line_pl.place(line_placed) + graph_pl.place(graph_placed) + + assert line_placed.qubits[0] == line_map[circ_qbs[0]] + assert line_placed.qubits[1] == line_map[circ_qbs[1]] + assert line_placed.qubits[2] == line_map[circ_qbs[2]] + + assert base_placed.qubits[0] == base_map[circ_qbs[0]] + assert base_placed.qubits[1] == base_map[circ_qbs[1]] + assert base_placed.qubits[2] == base_map[circ_qbs[2]] + + assert graph_placed.qubits[0] == graph_map[circ_qbs[0]] + assert graph_placed.qubits[1] == graph_map[circ_qbs[1]] + assert graph_placed.qubits[2] == graph_map[circ_qbs[2]] + + assert circ_qbs != base_placed.qubits + assert circ_qbs != line_placed.qubits + assert circ_qbs != graph_placed.qubits + + mm = MappingManager(test_architecture) + mm.route_circuit(base_placed, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) + mm.route_circuit(line_placed, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) + mm.route_circuit(graph_placed, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) + + assert base_placed.valid_connectivity(test_architecture, False) + assert line_placed.valid_connectivity(test_architecture, False) + assert graph_placed.valid_connectivity(test_architecture, False) + + +def test_placements_serialization() -> None: + with open( + Path(__file__).resolve().parent / "json_test_files" / "placements.json", "r" + ) as f: + dict = json.load(f) + base_pl_serial = dict["base_placement"] + line_pl_serial = dict["line_placement"] + graph_pl_serial = dict["graph_placement"] + noise_pl_serial = dict["noise_placement"] + + assert Placement.from_dict(base_pl_serial).to_dict() == base_pl_serial + assert LinePlacement.from_dict(line_pl_serial).to_dict() == line_pl_serial + assert GraphPlacement.from_dict(graph_pl_serial).to_dict() == graph_pl_serial + assert NoiseAwarePlacement.from_dict(noise_pl_serial).to_dict() == noise_pl_serial + + +def test_placement_config() -> None: + test_coupling = [(0, 1), (1, 2), (1, 3), (4, 1), (4, 5)] + test_architecture = Architecture(test_coupling) + test_pl = GraphPlacement(test_architecture) + test_circuit = Circuit(6) + test_circuit.CX(0, 1) + test_circuit.CX(2, 3) + test_circuit.CX(4, 3) + test_circuit.CX(2, 4) + test_circuit.CX(3, 5) + test_circuit.CX(0, 5) + circ1 = test_circuit.copy() + circ2 = test_circuit.copy() + map1 = test_pl.get_placement_map(test_circuit) + test_pl.place(circ1) + test_pl.modify_config( + max_matches=1, depth_limit=0, max_interaction_edges=2, timeout=100 + ) + map2 = test_pl.get_placement_map(test_circuit) + test_pl.place(circ2) + assert map1 != map2 + + mm = MappingManager(test_architecture) + mm.route_circuit(circ1, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) + mm.route_circuit(circ2, [LexiLabellingMethod(), LexiRouteRoutingMethod()]) + assert circ1.n_gates < circ2.n_gates + + +def test_convert_index_mapping() -> None: + test_circuit = Circuit(6) + test_circuit.CX(0, 1) + test_circuit.CX(2, 3) + test_circuit.CX(4, 3) + test_circuit.CX(2, 4) + test_circuit.CX(3, 5) + test_circuit.CX(0, 5) + + c0 = test_circuit.copy() + c1 = test_circuit.copy() + + index_map = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3} + uid_map = {Qubit(i): Node(j) for i, j in index_map.items()} + circ_qbs = test_circuit.qubits + assert uid_map[circ_qbs[0]] == Node(1) + assert uid_map[circ_qbs[1]] == Node(2) + assert uid_map[circ_qbs[2]] == Node(0) + assert uid_map[circ_qbs[3]] == Node(4) + assert uid_map[circ_qbs[4]] == Node(3) + + place_with_map(test_circuit, uid_map) + + new_circ_qbs = test_circuit.qubits + assert circ_qbs != new_circ_qbs + assert new_circ_qbs[0] == Node(0) + assert new_circ_qbs[1] == Node(1) + assert new_circ_qbs[2] == Node(2) + assert new_circ_qbs[3] == Node(3) + assert new_circ_qbs[4] == Node(4) + assert new_circ_qbs[5] == Qubit("unplaced", 0) + + index_map_0 = {0: 5, 1: 4, 2: 0, 3: 1, 4: 3, 5: 2} + index_map_1 = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3, 5: 5} + uid_0 = {Qubit(i): Node(j) for i, j in index_map_0.items()} + uid_1 = {Qubit(i): Node(j) for i, j in index_map_1.items()} + assert uid_0 != uid_1 + + place_with_map(c0, uid_0) + place_with_map(c1, uid_1) + assert c0 != c1 + + +def test_place_with_map_twice() -> None: + # TKET-671 + c = Circuit(6).CX(0, 1).CX(2, 3).CX(4, 3).CX(2, 4).CX(3, 5).CX(0, 5) + + index_map = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3} + uid_map = {Qubit(i): Node(j) for i, j in index_map.items()} + c_qbs = c.qubits + assert uid_map[c_qbs[0]] == Node(1) + assert uid_map[c_qbs[1]] == Node(2) + assert uid_map[c_qbs[2]] == Node(0) + assert uid_map[c_qbs[3]] == Node(4) + assert uid_map[c_qbs[4]] == Node(3) + + assert all(qb.reg_name == "q" for qb in c.qubits) + place_with_map(c, uid_map) + assert all(qb.reg_name in ["node", "unplaced"] for qb in c.qubits) + place_with_map(c, uid_map) + assert all(qb.reg_name == "unplaced" for qb in c.qubits) + + +def test_big_placement() -> None: + # TKET-1275 + c = circuit_from_qasm( + Path(__file__).resolve().parent / "qasm_test_files" / "test14.qasm" + ) + arc = Architecture( + [ + [0, 1], + [0, 14], + [1, 0], + [1, 2], + [1, 13], + [2, 1], + [2, 3], + [2, 12], + [3, 2], + [3, 4], + [3, 11], + [4, 3], + [4, 5], + [4, 10], + [5, 4], + [5, 6], + [5, 9], + [6, 5], + [6, 8], + [7, 8], + [8, 6], + [8, 7], + [8, 9], + [9, 5], + [9, 8], + [9, 10], + [10, 4], + [10, 9], + [10, 11], + [11, 3], + [11, 10], + [11, 12], + [12, 2], + [12, 11], + [12, 13], + [13, 1], + [13, 12], + [13, 14], + [14, 0], + [14, 13], + ] + ) + assert PauliSimp().apply(c) + assert DefaultMappingPass(arc).apply(c) + + +if __name__ == "__main__": + test_placements() + test_placements_serialization() + test_placement_config() + test_convert_index_mapping() + test_place_with_map_twice() + test_big_placement() diff --git a/pytket/tests/predicates_test.py b/pytket/tests/predicates_test.py index 85fcbad710..ee53dac1be 100644 --- a/pytket/tests/predicates_test.py +++ b/pytket/tests/predicates_test.py @@ -32,6 +32,7 @@ RoutingPass, CXMappingPass, PlacementPass, + NaivePlacementPass, RenameQubitsPass, FullMappingPass, DefaultMappingPass, @@ -45,16 +46,7 @@ DecomposeBoxes, PeepholeOptimise2Q, FullPeepholeOptimise, - RebaseCirq, - RebaseHQS, - RebaseProjectQ, - RebasePyZX, - RebaseQuil, RebaseTket, - RebaseUMD, - RebaseUFR, - RebaseOQC, - SquashHQS, FlattenRegisters, SquashCustom, DelayMeasures, @@ -66,6 +58,7 @@ SimplifyInitial, RemoveBarriers, PauliSquash, + auto_rebase_pass, ) from pytket.predicates import ( # type: ignore GateSetPredicate, @@ -74,7 +67,14 @@ CompilationUnit, UserDefinedPredicate, ) -from pytket.routing import Architecture, Placement, GraphPlacement # type: ignore +from pytket.mapping import ( # type: ignore + LexiLabellingMethod, + LexiRouteRoutingMethod, + MultiGateReorderRoutingMethod, + BoxDecompositionRoutingMethod, +) +from pytket.architecture import Architecture # type: ignore +from pytket.placement import Placement, GraphPlacement # type: ignore from pytket.transform import Transform, PauliSynthStrat, CXConfigType # type: ignore from pytket._tket.passes import SynthesiseOQC # type: ignore import numpy as np @@ -132,7 +132,7 @@ def test_rebase_pass_generation() -> None: cx = Circuit(2) cx.CX(0, 1) pz_rebase = RebaseCustom( - {OpType.CX}, cx, {OpType.PhasedX, OpType.Rz}, tk1_to_phasedxrz + {OpType.CX, OpType.PhasedX, OpType.Rz}, cx, tk1_to_phasedxrz ) circ = Circuit(2) circ.X(0).Y(1) @@ -200,19 +200,36 @@ def test_routing_and_placement_pass() -> None: pl = Placement(arc) routing = RoutingPass(arc) placement = PlacementPass(pl) + nplacement = NaivePlacementPass(arc) cu = CompilationUnit(circ.copy()) assert placement.apply(cu) assert routing.apply(cu) + assert nplacement.apply(cu) expected_map = {q[0]: n1, q[1]: n0, q[2]: n2, q[3]: n5, q[4]: n3} assert cu.initial_map == expected_map + cu1 = CompilationUnit(circ.copy()) + assert nplacement.apply(cu1) + arcnodes = arc.nodes + expected_nmap = { + q[0]: arcnodes[0], + q[1]: arcnodes[1], + q[2]: arcnodes[2], + q[3]: arcnodes[3], + q[4]: arcnodes[4], + } + assert cu1.initial_map == expected_nmap # check composition works ok - seq_pass = SequencePass([SynthesiseTket(), placement, routing, SynthesiseUMD()]) + seq_pass = SequencePass( + [SynthesiseTket(), placement, routing, nplacement, SynthesiseUMD()] + ) cu2 = CompilationUnit(circ.copy()) assert seq_pass.apply(cu2) assert cu2.initial_map == expected_map - full_pass = FullMappingPass(arc, pl) + full_pass = FullMappingPass( + arc, pl, config=[LexiLabellingMethod(), LexiRouteRoutingMethod()] + ) cu3 = CompilationUnit(circ.copy()) assert full_pass.apply(cu3) assert cu3.initial_map == expected_map @@ -221,7 +238,7 @@ def test_routing_and_placement_pass() -> None: def test_default_mapping_pass() -> None: circ = Circuit() - q = circ.add_q_register("q", 5) + q = circ.add_q_register("q", 6) circ.CX(0, 1) circ.H(0) circ.Z(1) @@ -231,14 +248,17 @@ def test_default_mapping_pass() -> None: circ.X(2) circ.CX(1, 4) circ.CX(0, 4) + circ.H(5) n0 = Node("b", 0) n1 = Node("b", 1) n2 = Node("b", 2) n3 = Node("a", 0) n4 = Node("f", 0) - arc = Architecture([[n0, n1], [n1, n2], [n2, n3], [n3, n4]]) + n5 = Node("g", 7) + arc = Architecture([[n0, n1], [n1, n2], [n2, n3], [n3, n4], [n4, n5]]) pl = GraphPlacement(arc) + nplacement = NaivePlacementPass(arc) routing = RoutingPass(arc) placement = PlacementPass(pl) default = DefaultMappingPass(arc) @@ -247,6 +267,7 @@ def test_default_mapping_pass() -> None: assert placement.apply(cu_rp) assert routing.apply(cu_rp) + assert nplacement.apply(cu_rp) assert default.apply(cu_def) assert cu_rp.circuit == cu_def.circuit @@ -320,7 +341,7 @@ def test_RebaseOQC_and_SynthesiseOQC() -> None: u_before_oqc = circ3.get_unitary() assert np.allclose(u, u_before_oqc) - RebaseOQC().apply(circ3) + auto_rebase_pass(oqc_gateset).apply(circ3) assert oqc_gateset_pred.verify(circ3) u_before_rebase_tket = circ3.get_unitary() assert np.allclose(u, u_before_rebase_tket) @@ -562,14 +583,7 @@ def test_library_pass_config() -> None: FullPeepholeOptimise().to_dict()["StandardPass"]["name"] == "FullPeepholeOptimise" ) - assert RebaseCirq().to_dict()["StandardPass"]["name"] == "RebaseCirq" - assert RebaseHQS().to_dict()["StandardPass"]["name"] == "RebaseHQS" - assert RebaseProjectQ().to_dict()["StandardPass"]["name"] == "RebaseProjectQ" - assert RebasePyZX().to_dict()["StandardPass"]["name"] == "RebasePyZX" - assert RebaseQuil().to_dict()["StandardPass"]["name"] == "RebaseQuil" assert RebaseTket().to_dict()["StandardPass"]["name"] == "RebaseTket" - assert RebaseUMD().to_dict()["StandardPass"]["name"] == "RebaseUMD" - assert RebaseUFR().to_dict()["StandardPass"]["name"] == "RebaseUFR" assert ( RemoveRedundancies().to_dict()["StandardPass"]["name"] == "RemoveRedundancies" ) @@ -577,12 +591,6 @@ def test_library_pass_config() -> None: assert SynthesiseTket().to_dict()["StandardPass"]["name"] == "SynthesiseTket" assert SynthesiseOQC().to_dict()["StandardPass"]["name"] == "SynthesiseOQC" assert SynthesiseUMD().to_dict()["StandardPass"]["name"] == "SynthesiseUMD" - # Share name with SquashCustom - assert SquashHQS().to_dict()["StandardPass"]["name"] == "SquashCustom" - assert set(SquashHQS().to_dict()["StandardPass"]["basis_singleqs"]) == { - "Rz", - "PhasedX", - } assert FlattenRegisters().to_dict()["StandardPass"]["name"] == "FlattenRegisters" assert DelayMeasures().to_dict()["StandardPass"]["name"] == "DelayMeasures" @@ -622,14 +630,15 @@ def sq(a: float, b: float, c: float) -> Circuit: cx = Circuit(2) cx.CX(0, 1) pz_rebase = RebaseCustom( - {OpType.CX}, cx, {OpType.PhasedX, OpType.Rz}, tk1_to_phasedxrz + {OpType.CX, OpType.PhasedX, OpType.Rz}, cx, tk1_to_phasedxrz ) assert pz_rebase.to_dict()["StandardPass"]["name"] == "RebaseCustom" - assert pz_rebase.to_dict()["StandardPass"]["basis_multiqs"] == ["CX"] - assert set(pz_rebase.to_dict()["StandardPass"]["basis_singleqs"]) == { + assert set(pz_rebase.to_dict()["StandardPass"]["basis_allowed"]) == { + "CX", "PhasedX", "Rz", } + assert cx.to_dict() == pz_rebase.to_dict()["StandardPass"]["basis_cx_replacement"] # EulerAngleReduction euler_pass = EulerAngleReduction(OpType.Ry, OpType.Rx) @@ -638,12 +647,8 @@ def sq(a: float, b: float, c: float) -> Circuit: assert euler_pass.to_dict()["StandardPass"]["euler_p"] == "Rx" # RoutingPass arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) - r_pass = RoutingPass(arc, swap_lookahead=10, bridge_interactions=10) + r_pass = RoutingPass(arc) assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass" - assert r_pass.to_dict()["StandardPass"]["routing_config"]["depth_limit"] == 10 - assert ( - r_pass.to_dict()["StandardPass"]["routing_config"]["interactions_limit"] == 10 - ) assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"]) # PlacementPass placer = GraphPlacement(arc) @@ -651,6 +656,10 @@ def sq(a: float, b: float, c: float) -> Circuit: assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass" assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement" assert p_pass.to_dict()["StandardPass"]["placement"]["config"]["depth_limit"] == 5 + # NaivePlacementPass + np_pass = NaivePlacementPass(arc) + assert np_pass.to_dict()["StandardPass"]["name"] == "NaivePlacementPass" + assert check_arc_dict(arc, np_pass.to_dict()["StandardPass"]["architecture"]) # RenameQubitsPass qm = {Qubit("a", 0): Qubit("b", 1), Qubit("a", 1): Qubit("b", 0)} rn_pass = RenameQubitsPass(qm) @@ -659,21 +668,64 @@ def sq(a: float, b: float, c: float) -> Circuit: [k.to_list(), v.to_list()] for k, v in qm.items() ] # FullMappingPass - fm_pass = FullMappingPass(arc, placer) + fm_pass = FullMappingPass( + arc, + placer, + config=[ + LexiLabellingMethod(), + LexiRouteRoutingMethod(), + MultiGateReorderRoutingMethod(), + BoxDecompositionRoutingMethod(), + ], + ) assert fm_pass.to_dict()["pass_class"] == "SequencePass" p_pass = fm_pass.get_sequence()[0] r_pass = fm_pass.get_sequence()[1] - assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass" + np_pass = fm_pass.get_sequence()[2] + assert np_pass.to_dict()["StandardPass"]["name"] == "NaivePlacementPass" assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass" + assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass" assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"]) assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement" + assert r_pass.to_dict()["StandardPass"]["routing_config"] == [ + {"name": "LexiLabellingMethod"}, + { + "name": "LexiRouteRoutingMethod", + "depth": 10, + }, + { + "name": "MultiGateReorderRoutingMethod", + "depth": 10, + "size": 10, + }, + {"name": "BoxDecompositionRoutingMethod"}, + ] + assert r_pass.to_dict()["StandardPass"]["routing_config"][3] == { + "name": "BoxDecompositionRoutingMethod" + } # DefaultMappingPass dm_pass = DefaultMappingPass(arc) assert dm_pass.to_dict()["pass_class"] == "SequencePass" + p_pass = dm_pass.get_sequence()[0].get_sequence()[0] + r_pass = dm_pass.get_sequence()[0].get_sequence()[1] + np_pass = dm_pass.get_sequence()[0].get_sequence()[2] + d_pass = dm_pass.get_sequence()[1] + assert d_pass.to_dict()["StandardPass"]["name"] == "DelayMeasures" + assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass" + assert np_pass.to_dict()["StandardPass"]["name"] == "NaivePlacementPass" + assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass" + assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"]) + assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement" + # DefaultMappingPass with delay_measures=False + dm_pass = DefaultMappingPass(arc, False) + assert dm_pass.to_dict()["pass_class"] == "SequencePass" + assert len(dm_pass.get_sequence()) == 3 p_pass = dm_pass.get_sequence()[0] r_pass = dm_pass.get_sequence()[1] + np_pass = dm_pass.get_sequence()[2] assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass" assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass" + assert np_pass.to_dict()["StandardPass"]["name"] == "NaivePlacementPass" assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"]) assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement" # AASRouting diff --git a/pytket/tests/program_test.py b/pytket/tests/program_test.py deleted file mode 100644 index 259cf06252..0000000000 --- a/pytket/tests/program_test.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2019-2022 Cambridge Quantum Computing -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pytket import OpType, Qubit, Bit -from pytket.program import Program # type: ignore - -import pytest # type: ignore - - -def test_straight_line_gen() -> None: - p = Program(2, 2) - p.add_gate(OpType.X, [0]) - p.add_gate(OpType.Y, [1], condition_bits=[0, 1], condition_value=3) - p.add_gate(OpType.Measure, [0, 0]) - - commands = p.get_commands() - assert len(commands) == 4 - assert str(commands[0]) == "X q[0];" - assert str(commands[1]) == "IF ([c[0], c[1]] == 3) THEN Y q[1];" - assert str(commands[2]) == "Measure q[0] --> c[0];" - assert str(commands[3]) == "Stop;" - - -def test_append() -> None: - p = Program() - qr = p.add_q_register("qr", 3) - cr = p.add_c_register("cr", 2) - p.add_gate(OpType.X, [], [qr[0]]) - p.add_gate(OpType.Measure, [], [qr[2], cr[0]]) - assert len(p.qubits) == 3 - assert len(p.bits) == 2 - - p2 = Program() - anc = Qubit("anc") - err = Bit("err") - p2.add_qubit(anc) - assert len(p2.qubits) == 1 - p2.add_bit(err) - assert len(p2.bits) == 1 - - p2.add_gate(OpType.Measure, [], [anc, err]) - p.append(p2) - commands = p.get_commands() - assert len(commands) == 4 - assert str(commands[2]) == "Measure anc --> err;" - - bmap = p.bit_readout - assert len(bmap) == 3 - assert bmap[err] == 2 - qmap = p.qubit_readout - assert len(qmap) == 1 # Only the anc-->err measure is in the last block - assert qmap[anc] == 2 - - -def test_append_if() -> None: - p = Program(2, 2) - p.add_gate(OpType.X, [0]) - - p2 = Program(2) - p2.add_gate(OpType.CX, [0, 1]) - - p.append_if(Bit(0), p2) - commands = p.get_commands() - assert len(commands) == 7 - assert str(commands[0]) == "X q[0];" - assert str(commands[1]) == "Branch lab_0 c[0];" - assert str(commands[2]) == "Goto lab_1;" - assert str(commands[3]) == "Label lab_0;" - assert str(commands[4]) == "CX q[0], q[1];" - assert str(commands[5]) == "Label lab_1;" - assert str(commands[6]) == "Stop;" - - -def test_append_if_else() -> None: - p = Program(2, 2) - p.add_gate(OpType.X, [0]) - - p2 = Program(2) - p2.add_gate(OpType.CX, [0, 1]) - - p3 = Program(2) - p3.add_gate(OpType.CZ, [1, 0]) - - p.append_if_else(Bit(0), p2, p3) - commands = p.get_commands() - assert len(commands) == 8 - assert str(commands[0]) == "X q[0];" - assert str(commands[1]) == "Branch lab_0 c[0];" - assert str(commands[2]) == "CZ q[1], q[0];" - assert str(commands[3]) == "Goto lab_1;" - assert str(commands[4]) == "Label lab_0;" - assert str(commands[5]) == "CX q[0], q[1];" - assert str(commands[6]) == "Label lab_1;" - assert str(commands[7]) == "Stop;" - - -def test_append_while() -> None: - p = Program(2, 2) - p.add_gate(OpType.X, [0]) - - p2 = Program(2) - p2.add_gate(OpType.CX, [0, 1]) - - p.append_while(Bit(0), p2) - commands = p.get_commands() - assert len(commands) == 9 - assert str(commands[0]) == "X q[0];" - assert str(commands[1]) == "Label lab_0;" - assert str(commands[2]) == "Branch lab_1 c[0];" - assert str(commands[3]) == "Goto lab_2;" - assert str(commands[4]) == "Label lab_1;" - assert str(commands[5]) == "CX q[0], q[1];" - assert str(commands[6]) == "Goto lab_0;" - assert str(commands[7]) == "Label lab_2;" - assert str(commands[8]) == "Stop;" - - -if __name__ == "__main__": - test_straight_line_gen() - test_append() - test_append_if() - test_append_if_else() - test_append_while() diff --git a/pytket/tests/pytket_config_test.py b/pytket/tests/pytket_config_test.py index b59d959c32..b9b5f30482 100644 --- a/pytket/tests/pytket_config_test.py +++ b/pytket/tests/pytket_config_test.py @@ -58,11 +58,6 @@ def test_pytket_config() -> None: config = load_config_file() - assert isinstance(config.enable_telemetry, bool) - if config.enable_telemetry: - assert isinstance(config.telemetry_id, str) - else: - assert config.telemetry_id is None assert isinstance(config.extensions, dict) diff --git a/pytket/tests/qasm_test.py b/pytket/tests/qasm_test.py index 891b567c33..e6ee8ddb5c 100644 --- a/pytket/tests/qasm_test.py +++ b/pytket/tests/qasm_test.py @@ -48,8 +48,8 @@ def test_qasm_correct() -> None: assert c.n_qubits == 4 assert c.depth() == 8 coms = c.get_commands() - assert len(coms) == 11 - correct_str = "[Rz(1.5) q[3];, Rx(0.0375) q[3];, Rz(0.5) q[3];, CX q[0], q[3];, CZ q[0], q[1];, Rz(1.5) q[3];, Rx(1.9625) q[3];, CCX q[3], q[1], q[2];, Barrier q[0], q[3], q[2];, CU1(0.8) q[0], q[1];, U3(1, 0.5, 0.3) q[2];]" + assert len(coms) == 13 + correct_str = "[XXPhase(0.0375) q[0], q[1];, Rz(1.5) q[3];, ZZPhase(0.0375) q[0], q[1];, Rx(0.0375) q[3];, Rz(0.5) q[3];, CX q[0], q[3];, CZ q[0], q[1];, Rz(1.5) q[3];, Rx(1.9625) q[3];, CCX q[3], q[1], q[2];, Barrier q[0], q[3], q[2];, CU1(0.8) q[0], q[1];, U3(1, 0.5, 0.3) q[2];]" assert str(coms) == correct_str # TKET-871 fname2 = str(curr_file_path / "qasm_test_files/test9.qasm") @@ -343,6 +343,13 @@ def test_new_qelib1_aliases() -> None: assert "U3(0, 0, 0) q[0]" in commands_str +def test_h1_rzz() -> None: + c = Circuit(2) + c.add_gate(OpType.ZZPhase, [0.1], [0, 1]) + assert "rzz" in circuit_to_qasm_str(c, header="qelib1") + assert "RZZ" in circuit_to_qasm_str(c, header="hqslib1") + + if __name__ == "__main__": test_qasm_correct() test_qasm_qubit() diff --git a/pytket/tests/qasm_test_files/test1.qasm b/pytket/tests/qasm_test_files/test1.qasm index 154da5f9b6..6bb78b7a35 100644 --- a/pytket/tests/qasm_test_files/test1.qasm +++ b/pytket/tests/qasm_test_files/test1.qasm @@ -4,7 +4,9 @@ include "qelib1.inc"; qreg q[4]; rz(1.5*pi) q[3]; rx(0.0375*pi) q[3]; +rxx(0.0375*pi) q[0],q[1]; rz(0.5*pi) q[3]; +rzz(0.0375*pi) q[0],q[1]; cx q[0],q[3]; rz(1.5*pi) q[3]; rx(1.9625*pi) q[3]; @@ -12,4 +14,4 @@ cz q[0] ,q[1]; //hey look ma its a cz ccx q[3],q[1],q[2]; barrier q[0],q[3],q[2]; u3(3.141596, 0.5* pi ,0.3*pi) q[2]; -cu1(0.8*pi) q[0],q[1]; \ No newline at end of file +cu1(0.8*pi) q[0],q[1]; diff --git a/pytket/tests/qubitpaulioperator_test.py b/pytket/tests/qubitpaulioperator_test.py index 5b7ee5313d..e96c5cea8e 100644 --- a/pytket/tests/qubitpaulioperator_test.py +++ b/pytket/tests/qubitpaulioperator_test.py @@ -27,16 +27,9 @@ import strategies as st # type: ignore -try: - from openfermion import QubitOperator # type: ignore - - _of_installed = True -except ImportError: - _of_installed = False - def test_QubitPauliOperator_addition() -> None: - x = Symbol("x") + x = Symbol("x") # type: ignore qpo = QubitPauliOperator() qpo += QubitPauliOperator( { @@ -56,9 +49,9 @@ def test_QubitPauliOperator_addition() -> None: def test_QubitPauliOperator_scalarmult() -> None: - y = Symbol("y") + y = Symbol("y") # type: ignore qpo = QubitPauliOperator({QubitPauliString(Qubit("q"), Pauli.X): y}) - x = Symbol("x") + x = Symbol("x") # type: ignore qpo2 = x * qpo qpo3 = qpo * x assert qpo2 == qpo3 @@ -68,9 +61,9 @@ def test_QubitPauliOperator_scalarmult() -> None: def test_QubitPauliOperator_opmult() -> None: - y = Symbol("y") + y = Symbol("y") # type: ignore qpo = QubitPauliOperator({QubitPauliString(Qubit(0), Pauli.Z): y}) - x = Symbol("x") + x = Symbol("x") # type: ignore qpo2 = QubitPauliOperator({QubitPauliString(Qubit(0), Pauli.X): x}) qpo3 = qpo * qpo2 # order matters! assert qpo3._dict[QubitPauliString(Qubit(0), Pauli.Y)] == 1j * x * y @@ -80,7 +73,7 @@ def test_QubitPauliOperator_opmult() -> None: def test_QubitPauliOperator_substitution() -> None: qps = QubitPauliString(Qubit(0), Pauli.X) - e = Symbol("e") + e = Symbol("e") # type: ignore exp = e + 5 qpo = QubitPauliOperator({qps: exp}) qpo.subs({e: 1}) @@ -98,7 +91,7 @@ def test_QubitPauliOperator_io() -> None: qps1 = pickle.loads(string_data) assert qps0 == qps1 qps2 = QubitPauliString(Qubit(2), Pauli.Z) - a = Symbol("a") + a = Symbol("a") # type: ignore op = QubitPauliOperator({qps1: a, qps2: 3.1}) op_data = pickle.dumps(op) op2 = pickle.loads(op_data) @@ -107,29 +100,6 @@ def test_QubitPauliOperator_io() -> None: assert np.isclose(complex(op2[qps2]), 3.1) -@pytest.mark.skipif(not _of_installed, reason="openfermion not installed") -def test_QubitPauliOperator_conversion() -> None: - openf_op = QubitOperator("X0 X1 Y2 Z3", 0.34) - openf_op += QubitOperator("Z0 X1 Y2 Z3", -0.1j) - - tk_op = QubitPauliOperator.from_OpenFermion(openf_op) - assert len(tk_op._dict) == 2 - - qbs = [Qubit(i) for i in range(4)] - qps1 = QubitPauliString(qbs, [Pauli.X, Pauli.X, Pauli.Y, Pauli.Z]) - qps2 = QubitPauliString(qbs, [Pauli.Z, Pauli.X, Pauli.Y, Pauli.Z]) - assert np.isclose(complex(tk_op[qps1]), 0.34) - assert np.isclose(complex(tk_op[qps2]), -0.1j) - - openf_op2 = tk_op.to_OpenFermion() - assert openf_op == openf_op2 - - tk_op[QubitPauliString(Qubit(0), Pauli.Z)] = Symbol("x") - with pytest.raises(ValueError) as errorinfo: - fail_openf_op = tk_op.to_OpenFermion() - assert "QubitPauliOperator contains unevaluated symbols." in str(errorinfo.value) - - def test_QubitPauliOperator_matrices() -> None: qbs = [Qubit(i) for i in range(2)] qpsXY = QubitPauliString(qbs, [Pauli.X, Pauli.Y]) @@ -171,7 +141,7 @@ def test_QubitPauliOperator_compression() -> None: qpsXY = QubitPauliString(qbs, [Pauli.X, Pauli.Y]) qpsZI = QubitPauliString(qbs, [Pauli.Z, Pauli.I]) qpsYY = QubitPauliString(qbs, [Pauli.Y, Pauli.Y]) - x = Symbol("x") + x = Symbol("x") # type: ignore op = QubitPauliOperator({qpsXY: 2, qpsZI: 1e-11j * x, qpsYY: 1e-11 * x + 1j}) op.compress() with pytest.raises(KeyError) as errorinfo: @@ -180,8 +150,8 @@ def test_QubitPauliOperator_compression() -> None: assert op[qpsXY] == 2 assert re(op[qpsYY]) == 0 assert im(op[qpsYY]) - assert op[qpsYY].subs({x: 0.001}) == 1j - assert op[qpsYY].subs({x: 10}) == 1j + assert op[qpsYY].subs({x: 0.001}).equals(1.0j) + assert op[qpsYY].subs({x: 10}).equals(1.0j) if __name__ == "__main__": @@ -190,18 +160,20 @@ def test_QubitPauliOperator_compression() -> None: test_QubitPauliOperator_opmult() test_QubitPauliOperator_substitution() test_QubitPauliOperator_io() - test_QubitPauliOperator_conversion() test_QubitPauliOperator_matrices() test_QubitPauliOperator_compression() def test_QubitPauliString_serialization() -> None: - qps = QubitPauliString( + qps0 = QubitPauliString() + qps1 = QubitPauliString( [Qubit(i) for i in range(4)], [Pauli.Y, Pauli.I, Pauli.X, Pauli.Z] ) - serializable = qps.to_list() - assert QubitPauliString.from_list(serializable) == qps - assert json.loads(json.dumps(serializable)) == serializable + assert qps0.to_list() == [] + for qps in [qps0, qps1]: + serializable = qps.to_list() + assert QubitPauliString.from_list(serializable) == qps + assert json.loads(json.dumps(serializable)) == serializable def test_QubitPauliOperator_serialization() -> None: diff --git a/pytket/tests/requirements-openfermion.txt b/pytket/tests/requirements-openfermion.txt deleted file mode 100644 index c1826a9ecb..0000000000 --- a/pytket/tests/requirements-openfermion.txt +++ /dev/null @@ -1 +0,0 @@ -openfermion ~= 1.1 diff --git a/pytket/tests/routing_test.py b/pytket/tests/routing_test.py deleted file mode 100644 index 0374110853..0000000000 --- a/pytket/tests/routing_test.py +++ /dev/null @@ -1,876 +0,0 @@ -# Copyright 2019-2022 Cambridge Quantum Computing -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pathlib import Path -from pytket.circuit import OpType, Qubit, Node, Circuit # type: ignore -from pytket.routing import ( # type: ignore - NodeGraph, - Architecture, - LinePlacement, - GraphPlacement, - NoiseAwarePlacement, - Placement, - SquareGrid, - FullyConnected, - place_with_map, - route, -) -from pytket.predicates import CompilationUnit, NoMidMeasurePredicate # type: ignore -from pytket.passes import ( # type: ignore - DefaultMappingPass, - FullMappingPass, - RoutingPass, - PlacementPass, - CXMappingPass, - AASRouting, - PauliSimp, - CNotSynthType, -) -from pytket.qasm import circuit_from_qasm -from pytket.transform import Transform # type: ignore -import numpy as np -import pytest # type: ignore - -import json - - -def test_architectures() -> None: - basic_index_coupling = [(0, 1), (2, 1), (2, 3), (4, 3)] - basic_index_architecture = Architecture(basic_index_coupling) - basic_index_coupling_convert = [ - (Node(0), Node(1)), - (Node(2), Node(1)), - (Node(2), Node(3)), - (Node(4), Node(3)), - ] - assert basic_index_architecture.coupling == basic_index_coupling_convert - - node_0 = Node("example_register", 0) - node_1 = Node("example_register", 1) - node_2 = Node("example_register", 2) - node_3 = Node("example_register", 3) - basic_uid_coupling = [(node_0, node_1), (node_1, node_2), (node_2, node_3)] - basic_uid_architecture = Architecture(basic_uid_coupling) - assert basic_uid_architecture.coupling == basic_uid_coupling - - square_arc = SquareGrid(2, 2, 2) - assert square_arc.nodes[0] == Node("gridNode", [0, 0, 0]) - assert square_arc.coupling[0] == ( - Node("gridNode", [0, 0, 0]), - Node("gridNode", [0, 1, 0]), - ) - - -def test_architecture_eq() -> None: - coupling = [(1, 2), (3, 4), (0, 6), (0, 3)] - arc = Architecture(coupling) - - assert arc != Architecture([]) - assert arc == Architecture(coupling) - assert arc == Architecture([(Node(i), Node(j)) for (i, j) in coupling]) - assert arc != Architecture([(Node("s", i), Node("s", j)) for (i, j) in coupling]) - - # only Node IDs and coupling matters - g00, g01, g10, g11 = [ - Node("gridNode", [i, j, 0]) for i in range(2) for j in range(2) - ] - sq_arc = Architecture([(g00, g01), (g01, g11), (g00, g10), (g10, g11)]) - assert sq_arc == SquareGrid(2, 2) - assert sq_arc != Architecture([(g00, g01), (g01, g11), (g00, g10)]) - - -def test_fully_connected() -> None: - fc = FullyConnected(3) - assert fc.nodes == [Node("fcNode", i) for i in range(3)] - d = fc.to_dict() - fc1 = FullyConnected.from_dict(d) - assert fc == fc1 - - -def test_arch_types() -> None: - arch = Architecture([(0, 1)]) - assert isinstance(arch, Architecture) - assert isinstance(arch, NodeGraph) - fc = FullyConnected(2) - assert isinstance(fc, FullyConnected) - assert isinstance(fc, NodeGraph) - sg = SquareGrid(2, 2, 2) - assert isinstance(sg, SquareGrid) - assert isinstance(sg, NodeGraph) - - -def test_placements() -> None: - test_coupling = [(0, 1), (1, 2), (1, 3), (4, 1), (4, 5)] - test_architecture = Architecture(test_coupling) - circ = Circuit(6) - for pair in test_coupling: - circ.CX(pair[0], pair[1]) - circ_qbs = circ.qubits - base_pl = Placement(test_architecture) - line_pl = LinePlacement(test_architecture) - graph_pl = GraphPlacement(test_architecture) - base_placed = circ.copy() - line_placed = circ.copy() - graph_placed = circ.copy() - - base_map = base_pl.get_placement_map(circ) - line_map = line_pl.get_placement_map(circ) - graph_map = graph_pl.get_placement_map(circ) - - assert base_map != line_map - assert base_map != graph_map - assert circ.qubits == circ_qbs - - base_pl.place(base_placed) - line_pl.place(line_placed) - graph_pl.place(graph_placed) - - assert line_placed.qubits[0] == line_map[circ_qbs[0]] - assert line_placed.qubits[1] == line_map[circ_qbs[1]] - assert line_placed.qubits[2] == line_map[circ_qbs[2]] - - assert base_placed.qubits[0] == base_map[circ_qbs[0]] - assert base_placed.qubits[1] == base_map[circ_qbs[1]] - assert base_placed.qubits[2] == base_map[circ_qbs[2]] - - assert graph_placed.qubits[0] == graph_map[circ_qbs[0]] - assert graph_placed.qubits[1] == graph_map[circ_qbs[1]] - assert graph_placed.qubits[2] == graph_map[circ_qbs[2]] - - assert circ_qbs != base_placed.qubits - assert circ_qbs != line_placed.qubits - assert circ_qbs != graph_placed.qubits - - base_placed = route(base_placed, test_architecture) - line_placed = route(line_placed, test_architecture) - graph_placed = route(graph_placed, test_architecture) - - assert base_placed.valid_connectivity(test_architecture, False) - assert line_placed.valid_connectivity(test_architecture, False) - assert graph_placed.valid_connectivity(test_architecture, False) - - -def test_placements_serialization() -> None: - with open( - Path(__file__).resolve().parent / "json_test_files" / "placements.json", "r" - ) as f: - dict = json.load(f) - base_pl_serial = dict["base_placement"] - line_pl_serial = dict["line_placement"] - graph_pl_serial = dict["graph_placement"] - noise_pl_serial = dict["noise_placement"] - - assert Placement.from_dict(base_pl_serial).to_dict() == base_pl_serial - assert LinePlacement.from_dict(line_pl_serial).to_dict() == line_pl_serial - assert GraphPlacement.from_dict(graph_pl_serial).to_dict() == graph_pl_serial - assert NoiseAwarePlacement.from_dict(noise_pl_serial).to_dict() == noise_pl_serial - - -def test_placement_config() -> None: - test_coupling = [(0, 1), (1, 2), (1, 3), (4, 1), (4, 5)] - test_architecture = Architecture(test_coupling) - test_pl = GraphPlacement(test_architecture) - test_circuit = Circuit(6) - test_circuit.CX(0, 1) - test_circuit.CX(2, 3) - test_circuit.CX(4, 3) - test_circuit.CX(2, 4) - test_circuit.CX(3, 5) - test_circuit.CX(0, 5) - circ1 = test_circuit.copy() - circ2 = test_circuit.copy() - map1 = test_pl.get_placement_map(test_circuit) - test_pl.place(circ1) - test_pl.modify_config( - max_matches=1, depth_limit=0, max_interaction_edges=2, timeout=100 - ) - map2 = test_pl.get_placement_map(test_circuit) - test_pl.place(circ2) - assert map1 != map2 - circ1 = route(circ1, test_architecture) - circ2 = route(circ2, test_architecture) - assert circ1.n_gates < circ2.n_gates - - -def test_convert_index_mapping() -> None: - test_circuit = Circuit(6) - test_circuit.CX(0, 1) - test_circuit.CX(2, 3) - test_circuit.CX(4, 3) - test_circuit.CX(2, 4) - test_circuit.CX(3, 5) - test_circuit.CX(0, 5) - - c0 = test_circuit.copy() - c1 = test_circuit.copy() - - index_map = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3} - uid_map = {Qubit(i): Node(j) for i, j in index_map.items()} - circ_qbs = test_circuit.qubits - assert uid_map[circ_qbs[0]] == Node(1) - assert uid_map[circ_qbs[1]] == Node(2) - assert uid_map[circ_qbs[2]] == Node(0) - assert uid_map[circ_qbs[3]] == Node(4) - assert uid_map[circ_qbs[4]] == Node(3) - - place_with_map(test_circuit, uid_map) - - new_circ_qbs = test_circuit.qubits - assert circ_qbs != new_circ_qbs - assert new_circ_qbs[0] == Node(0) - assert new_circ_qbs[1] == Node(1) - assert new_circ_qbs[2] == Node(2) - assert new_circ_qbs[3] == Node(3) - assert new_circ_qbs[4] == Node(4) - assert new_circ_qbs[5] == Qubit("unplaced", 0) - - index_map_0 = {0: 5, 1: 4, 2: 0, 3: 1, 4: 3, 5: 2} - index_map_1 = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3, 5: 5} - uid_0 = {Qubit(i): Node(j) for i, j in index_map_0.items()} - uid_1 = {Qubit(i): Node(j) for i, j in index_map_1.items()} - assert uid_0 != uid_1 - - place_with_map(c0, uid_0) - place_with_map(c1, uid_1) - assert c0 != c1 - - -def test_basic_routing() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - - init_map = dict() - init_map[Qubit(0)] = Node(0) - init_map[Qubit(1)] = Node(1) - init_map[Qubit(2)] = Node(2) - init_map[Qubit(3)] = Node(3) - init_map[Qubit(4)] = Node(4) - pl = Placement(arc) - pl.place_with_map(circ, init_map) - out_circ = route(circ, arc, swap_lookahead=50) - assert out_circ.valid_connectivity(arc, False) - assert len(out_circ.get_commands()) == 10 - - -def test_basic_routing_with_line_map() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - lp = LinePlacement(arc) - lp.place(circ) - out_circ = route(circ, arc) - assert out_circ.valid_connectivity(arc, False) - assert len(out_circ.get_commands()) == 6 - - -def test_basic_routing_with_noise_map() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - - oq_fids = [ - [Node(0), 0.999], - [Node(1), 0.999], - [Node(2), 0.999], - [Node(3), 0.999], - [Node(4), 0.999], - ] - tq_fids = [ - [[Node(0), Node(1)], 0.9], - [[Node(1), Node(0)], 0.9], - [[Node(1), Node(2)], 0.89], - [[Node(2), Node(1)], 0.89], - [[Node(2), Node(3)], 0.7], - [[Node(3), Node(2)], 0.7], - [[Node(3), Node(4)], 0.59], - [[Node(4), Node(3)], 0.59], - ] - - tq_errs_dict = { - (Node(0), Node(1)): 0.1, - (Node(1), Node(0)): 0.1, - (Node(1), Node(2)): 0.11, - (Node(2), Node(1)): 0.11, - (Node(2), Node(3)): 0.3, - (Node(3), Node(2)): 0.3, - (Node(3), Node(4)): 0.41, - (Node(4), Node(3)): 0.41, - } - oq_errs_dict = {node: 1.0 - value for node, value in oq_fids} - - nap = NoiseAwarePlacement(arc, oq_errs_dict, tq_errs_dict) - nap.place(circ) - out_circ = route(circ, arc) - assert len(out_circ.get_commands()) == 6 - assert out_circ.valid_connectivity(arc, False) - - -def test_greedy_noise_route() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - - oq_fids = [ - [Node(0), 0.999], - [Node(1), 0.999], - [Node(2), 0.999], - [Node(3), 0.999], - [Node(4), 0.999], - ] - - tq_errs_dict = { - (Node(0), Node(1)): 0.1, - (Node(1), Node(0)): 0.1, - (Node(1), Node(2)): 0.11, - (Node(2), Node(1)): 0.11, - (Node(2), Node(3)): 0.3, - (Node(3), Node(2)): 0.3, - (Node(3), Node(4)): 0.41, - (Node(4), Node(3)): 0.41, - } - oq_errs_dict = {node: 1.0 - value for node, value in oq_fids} - nap = NoiseAwarePlacement(arc, oq_errs_dict, tq_errs_dict) - nap.place(circ) - out_circ = route(circ, arc) - - assert len(out_circ.get_commands()) == 6 - assert out_circ.valid_connectivity(arc, False) - - -def test_decompose_swap_to_cx() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - - init_map = dict() - init_map[Qubit(0)] = Node(0) - init_map[Qubit(1)] = Node(1) - init_map[Qubit(2)] = Node(2) - init_map[Qubit(3)] = Node(3) - init_map[Qubit(4)] = Node(4) - - pl = Placement(arc) - pl.place_with_map(circ, init_map) - - out_circ = route(circ, arc) - assert out_circ.valid_connectivity(arc, False) - Transform.DecomposeSWAPtoCX(arc).apply(out_circ) - assert len(out_circ.get_commands()) == 20 - Transform.DecomposeCXDirected(arc).apply(out_circ) - assert out_circ.valid_connectivity(arc, True) - assert len(out_circ.get_commands()) == 40 - - -def test_commuting_sq_through_swap() -> None: - circ = Circuit(5) - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ.H(0) - circ.H(1) - circ.H(2) - circ.H(3) - circ.H(4) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(0, 4) - - init_map = dict() - init_map[Qubit(0)] = Node(0) - init_map[Qubit(1)] = Node(1) - init_map[Qubit(2)] = Node(2) - init_map[Qubit(3)] = Node(3) - init_map[Qubit(4)] = Node(4) - - out_circ = route(circ, arc, initial_mapping=init_map) - assert out_circ.valid_connectivity(arc, False) - # oq_fidelities = [ - # [Node(0), OpType.H, 0.9], - # [Node(1), OpType.H, 0.3], - # [Node(2), OpType.H, 0.5], - # [Node(3), OpType.H, 0.67], - # [Node(4), OpType.H, 0.99999], - # ] - - # _commute_single_gates_through_swaps(out_circ,arc,oq_fidelities) TODO: UN COMMENT WHEN THE DEVICE CLASS IS EXPOSED!! - # Transform.CommuteSQThroughSWAP(devi).apply(out_circ) - - Transform.DecomposeSWAPtoCX(arc).apply(out_circ) - Transform.DecomposeCXDirected(arc).apply(out_circ) - assert out_circ.valid_connectivity(arc, True) - - -def test_noncontiguous_arc() -> None: - arc = Architecture([[0, 2]]) - pass1 = DefaultMappingPass(arc) - c = Circuit(2) - pass1.apply(c) - - -def test_noncontiguous_arc_phase_poly() -> None: - # testing non-contiguous ascending named nodes - arc = Architecture([[0, 2]]) - pass1 = AASRouting(arc, lookahead=1) - c = Circuit(2).H(0).H(1) - pass1.apply(c) - assert c.n_gates_of_type(OpType.H) == 2 - assert c.n_gates_of_type(OpType.CX) == 0 - assert c.n_gates_of_type(OpType.CX) == 0 - - -def test_RoutingPass() -> None: - arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) - circ = Circuit(5) - circ.CX(0, 1) - circ.CX(0, 3) - circ.CX(2, 4) - circ.CX(1, 4) - circ.CX(1, 3) - circ.CX(1, 2) - cu_0 = CompilationUnit(circ) - cu_1 = CompilationUnit(circ) - placer = GraphPlacement(arc) - p_pass = PlacementPass(placer) - r_pass_0 = RoutingPass(arc, swap_lookahead=10, bridge_interactions=10) - r_pass_1 = RoutingPass(arc, swap_lookahead=10, bridge_interactions=0) - p_pass.apply(cu_0) - p_pass.apply(cu_1) - r_pass_0.apply(cu_0) - r_pass_1.apply(cu_1) - out_circ_0 = cu_0.circuit - out_circ_1 = cu_1.circuit - # TODO Should we expect BRIDGE gates in out_circ_0? If not, replace with an example - # where we would. See See https://github.com/CQCL-DEV/tket/pull/747. - # assert out_circ_0.n_gates_of_type(OpType.BRIDGE) == 1 - assert out_circ_0.valid_connectivity(arc, False, True) - assert out_circ_1.n_gates_of_type(OpType.BRIDGE) == 0 - assert out_circ_1.valid_connectivity(arc, False, True) - - -def test_FullMappingPass() -> None: - arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) - circ = Circuit(5) - circ.CX(0, 1).CX(0, 3).CX(2, 4).CX(1, 4).CX(0, 4).CX(2, 1).CX(3, 0) - cu_0 = CompilationUnit(circ) - cu_1 = CompilationUnit(circ) - gp_placer = GraphPlacement(arc) - lp_placer = LinePlacement(arc) - m_pass_0 = FullMappingPass( - arc, gp_placer, swap_lookahead=10, bridge_interactions=10 - ) - m_pass_1 = FullMappingPass(arc, lp_placer) - m_pass_0.apply(cu_0) - m_pass_1.apply(cu_1) - out_circ_0 = cu_0.circuit - out_circ_1 = cu_1.circuit - assert out_circ_0.n_gates < out_circ_1.n_gates - assert out_circ_0.valid_connectivity(arc, False, True) - assert out_circ_1.valid_connectivity(arc, False, True) - - -def test_AAS() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc, lookahead=2) - assert pass1.apply(circ) - - -def test_AAS_2() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc) - assert pass1.apply(circ) - - -def test_AAS_3() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc, lookahead=2) - assert pass1.apply(circ) - - -def test_AAS_4() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc) - assert pass1.apply(circ) - - -def test_AAS_5() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc, lookahead=2) - assert pass1.apply(circ) - - -def test_AAS_6() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc) - assert pass1.apply(circ) - - -def test_AAS_7() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.H(0).H(2) - circ.CX(0, 1).CX(1, 2).CX(3, 4) - circ.Rz(0, 1) - pass1 = AASRouting(arc, lookahead=2) - assert pass1.apply(circ) - - -def test_AAS_8() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - circ = Circuit(5) - circ.CX(0, 1) - circ.H(0) - circ.Z(1) - circ.CX(0, 3) - circ.Rx(1.5, 3) - circ.CX(2, 4) - circ.X(2) - circ.CX(1, 4) - circ.CX(0, 4) - pass1 = AASRouting(arc, lookahead=2) - assert pass1.apply(circ) - - -def test_AAS_9() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8]]) - circ = Circuit(9) - circ.CX(0, 8).CX(8, 1).CX(1, 7).CX(7, 2).CX(2, 6).CX(6, 3).CX(3, 5).CX(5, 4) - circ.Rz(0.5, 4) - pass1 = AASRouting(arc, lookahead=2) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() < 56 - - -def test_AAS_10() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) - circ = Circuit(7) - circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) - circ.Rz(0.5, 3) - pass1 = AASRouting(arc, lookahead=2) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() < 33 - - -def test_AAS_11() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) - circ = Circuit(7) - circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) - circ.Rz(0.5, 3) - pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.SWAP) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() == 119 - - -def test_AAS_12() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) - circ = Circuit(7) - circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) - circ.Rz(0.5, 3) - pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.HamPath) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() == 36 - - -def test_AAS_13() -> None: - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) - circ = Circuit(7) - circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3) - circ.Rz(0.5, 3) - pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() == 28 - - -def test_AAS_14() -> None: - arc = Architecture([[0, 1], [1, 0], [1, 2], [2, 1]]) - circ = Circuit(3).CZ(0, 1) - pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() == 3 - - -def test_AAS_15() -> None: - arc = Architecture([[0, 1], [1, 0], [1, 2], [2, 1]]) - circ = Circuit(2).CZ(0, 1) - pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec) - cu = CompilationUnit(circ) - assert pass1.apply(cu) - out_circ = cu.circuit - assert out_circ.valid_connectivity(arc, False, True) - assert out_circ.depth() == 3 - - -def test_CXMappingPass() -> None: - arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) - circ = Circuit(5) - circ.Y(4).CX(0, 1).S(3).CX(0, 3).H(0).CX(2, 4).CX(1, 4).Y(1).CX(0, 4).CX(2, 1).Z( - 2 - ).CX(3, 0).CX(2, 0).CX(1, 3) - circ.measure_all() - cu_0 = CompilationUnit(circ) - cu_1 = CompilationUnit(circ) - gp_placer = GraphPlacement(arc) - lp_placer = LinePlacement(arc) - m_pass_0 = CXMappingPass( - arc, gp_placer, swap_lookahead=10, bridge_interactions=10, directed_cx=True - ) - m_pass_1 = CXMappingPass(arc, lp_placer, delay_measures=False) - m_pass_0.apply(cu_0) - m_pass_1.apply(cu_1) - out_circ_0 = cu_0.circuit - out_circ_1 = cu_1.circuit - - measure_pred = NoMidMeasurePredicate() - assert measure_pred.verify(cu_0.circuit) == True - assert measure_pred.verify(cu_1.circuit) == False - assert out_circ_0.valid_connectivity(arc, True) - assert out_circ_1.valid_connectivity(arc, False) - - -def test_CXMappingPass_correctness() -> None: - # TKET-1045 - arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) - placer = NoiseAwarePlacement(arc) - p = CXMappingPass(arc, placer, directed_cx=True, delay_measures=True) - c = Circuit(3).CX(0, 1).CX(1, 2).CCX(2, 1, 0).CY(1, 0).CY(2, 1) - cu = CompilationUnit(c) - p.apply(cu) - c1 = cu.circuit - u1 = c1.get_unitary() - assert all(np.isclose(abs(x), 0) or np.isclose(abs(x), 1) for x in u1.flatten()) - - -def test_place_with_map_twice() -> None: - # TKET-671 - c = Circuit(6).CX(0, 1).CX(2, 3).CX(4, 3).CX(2, 4).CX(3, 5).CX(0, 5) - - index_map = {0: 1, 1: 2, 2: 0, 3: 4, 4: 3} - uid_map = {Qubit(i): Node(j) for i, j in index_map.items()} - c_qbs = c.qubits - assert uid_map[c_qbs[0]] == Node(1) - assert uid_map[c_qbs[1]] == Node(2) - assert uid_map[c_qbs[2]] == Node(0) - assert uid_map[c_qbs[3]] == Node(4) - assert uid_map[c_qbs[4]] == Node(3) - - assert all(qb.reg_name == "q" for qb in c.qubits) - place_with_map(c, uid_map) - assert all(qb.reg_name in ["node", "unplaced"] for qb in c.qubits) - place_with_map(c, uid_map) - assert all(qb.reg_name == "unplaced" for qb in c.qubits) - - -def test_big_placement() -> None: - # TKET-1275 - c = circuit_from_qasm( - Path(__file__).resolve().parent / "qasm_test_files" / "test14.qasm" - ) - arc = Architecture( - [ - [0, 1], - [0, 14], - [1, 0], - [1, 2], - [1, 13], - [2, 1], - [2, 3], - [2, 12], - [3, 2], - [3, 4], - [3, 11], - [4, 3], - [4, 5], - [4, 10], - [5, 4], - [5, 6], - [5, 9], - [6, 5], - [6, 8], - [7, 8], - [8, 6], - [8, 7], - [8, 9], - [9, 5], - [9, 8], - [9, 10], - [10, 4], - [10, 9], - [10, 11], - [11, 3], - [11, 10], - [11, 12], - [12, 2], - [12, 11], - [12, 13], - [13, 1], - [13, 12], - [13, 14], - [14, 0], - [14, 13], - ] - ) - assert PauliSimp().apply(c) - assert DefaultMappingPass(arc).apply(c) - - -def test_CXMappingPass_terminates() -> None: - # TKET-1376 - c = circuit_from_qasm( - Path(__file__).resolve().parent / "qasm_test_files" / "test13.qasm" - ) - arc = Architecture( - [ - [0, 1], - [1, 0], - [1, 2], - [1, 4], - [2, 1], - [2, 3], - [3, 2], - [3, 5], - [4, 1], - [4, 7], - [5, 3], - [5, 8], - [6, 7], - [7, 4], - [7, 6], - [7, 10], - [8, 5], - [8, 9], - [8, 11], - [9, 8], - [10, 7], - [10, 12], - [11, 8], - [11, 14], - [12, 10], - [12, 13], - [12, 15], - [13, 12], - [13, 14], - [14, 11], - [14, 13], - [14, 16], - [15, 12], - [15, 18], - [16, 14], - [16, 19], - [17, 18], - [18, 15], - [18, 17], - [18, 21], - [19, 16], - [19, 20], - [19, 22], - [20, 19], - [21, 18], - [21, 23], - [22, 19], - [22, 25], - [23, 21], - [23, 24], - [24, 23], - [24, 25], - [25, 22], - [25, 24], - [25, 26], - [26, 25], - ] - ) - placer = NoiseAwarePlacement(arc) - placer.modify_config(timeout=10000) - p = CXMappingPass(arc, placer, directed_cx=False, delay_measures=False) - assert p.apply(c) - - -if __name__ == "__main__": - test_architectures() - test_placements() - test_placement_config() - test_convert_index_mapping() - test_basic_routing() - test_basic_routing_with_line_map() - test_commuting_sq_through_swap() - test_decompose_swap_to_cx() - test_greedy_noise_route() - test_basic_routing_with_noise_map() - test_noncontiguous_arc() - test_noncontiguous_arc_phase_poly() - test_RoutingPass() - test_FullMappingPass() - test_CXMappingPass() - test_place_with_map_twice() diff --git a/pytket/tests/simulator/tket_sim_backend.py b/pytket/tests/simulator/tket_sim_backend.py index a70ce314e3..6ba66d6c86 100644 --- a/pytket/tests/simulator/tket_sim_backend.py +++ b/pytket/tests/simulator/tket_sim_backend.py @@ -31,12 +31,12 @@ from pytket.backends.status import CircuitStatus, StatusEnum from pytket.passes import ( # type: ignore BasePass, - RebasePyZX, SequencePass, SynthesiseTket, FullPeepholeOptimise, DecomposeBoxes, SimplifyInitial, + auto_rebase_pass, ) from pytket.predicates import ( # type: ignore GateSetPredicate, @@ -50,6 +50,20 @@ from pytket.utils.results import probs_from_state +_GATE_SET = { + OpType.SWAP, + OpType.CX, + OpType.CZ, + OpType.Rz, + OpType.Rx, + OpType.S, + OpType.T, + OpType.S, + OpType.X, + OpType.H, +} + + class TketSimBackend(Backend): """Backend for running simulations with tket_sim.""" @@ -74,7 +88,7 @@ def required_predicates(self) -> List[Predicate]: ] def rebase_pass(self) -> BasePass: - return RebasePyZX() + return auto_rebase_pass(_GATE_SET) def default_compilation_pass(self, optimisation_level: int = 1) -> BasePass: assert optimisation_level in range(3) diff --git a/pytket/tests/strategies.py b/pytket/tests/strategies.py index 25456c842c..de3929df58 100644 --- a/pytket/tests/strategies.py +++ b/pytket/tests/strategies.py @@ -26,7 +26,7 @@ from pytket import Circuit, Qubit, Bit from pytket._tket.circuit import BasisOrder, Node, OpType # type: ignore -from pytket._tket.routing import Architecture # type: ignore +from pytket._tket.architecture import Architecture # type: ignore from pytket.pauli import Pauli, QubitPauliString # type: ignore from pytket.utils import QubitPauliOperator from pytket.utils.results import KwargTypes @@ -45,10 +45,19 @@ def qubits( draw: Callable, name: SearchStrategy[str] = st.from_regex(reg_name_regex, fullmatch=True), index: SearchStrategy[int] = uint32, -) -> Bit: +) -> Qubit: return Qubit(draw(name), draw(index)) +@st.composite +def nodes( + draw: Callable, + name: SearchStrategy[str] = st.from_regex(reg_name_regex, fullmatch=True), + index: SearchStrategy[int] = uint32, +) -> Node: + return Node(draw(name), draw(index)) + + @st.composite def circuits( draw: Callable[[SearchStrategy[Any]], Any], @@ -132,30 +141,63 @@ def architecture( return Architecture(draw(edges)) +@st.composite +def optypes(draw: Callable[[SearchStrategy[Any]], Any]) -> OpType: + return OpType(draw(st.integers(min_value=6, max_value=49))) + + +@st.composite +def errors(draw: Callable[[SearchStrategy[Any]], Any]) -> Any: + return draw(st.floats(min_value=0.0, max_value=1.0)) + + +@st.composite +def optype_errors(draw: Callable[[SearchStrategy[Any]], Any]) -> Any: + return draw(st.dictionaries(optypes(), errors())) + + +@st.composite +def edges(draw: Callable[[SearchStrategy[Any]], Any]) -> Any: + return draw(st.tuples(nodes(), nodes())) + + @st.composite def backendinfo( draw: Callable[[SearchStrategy[Any]], Any], ) -> BackendInfo: - optypes = [OpType(i) for i in range(6, 54)] name = draw(st.text(min_size=1, max_size=30)) device_name = draw(st.text(min_size=1, max_size=30)) version = draw(st.text(min_size=1, max_size=5)) # hardware constraints arc = draw(architecture()) - gate_set = draw(st.sets(st.sampled_from(optypes))) + gate_set = draw(st.sets(optypes())) supports_fast_feedforward = draw(st.booleans()) supports_reset = draw(st.booleans()) supports_midcircuit_measurement = draw(st.booleans()) + all_node_gate_errors = draw(st.dictionaries(nodes(), optype_errors())) + all_edge_gate_errors = draw(st.dictionaries(edges(), optype_errors())) + all_readout_errors = draw(st.dictionaries(nodes(), st.lists(st.lists(errors())))) + averaged_node_gate_errors = draw(st.dictionaries(nodes(), errors())) + averaged_edge_gate_errors = draw(st.dictionaries(edges(), errors())) + averaged_readout_errors = draw(st.dictionaries(nodes(), errors())) + misc = draw(st.dictionaries(st.text(), st.text())) return BackendInfo( - name, - device_name, - version, - arc, - gate_set, - supports_fast_feedforward, - supports_reset, - supports_midcircuit_measurement, + name=name, + device_name=device_name, + version=version, + architecture=arc, + gate_set=gate_set, + supports_fast_feedforward=supports_fast_feedforward, + supports_reset=supports_reset, + supports_midcircuit_measurement=supports_midcircuit_measurement, + all_node_gate_errors=all_node_gate_errors, + all_edge_gate_errors=all_edge_gate_errors, + all_readout_errors=all_readout_errors, + averaged_node_gate_errors=averaged_node_gate_errors, + averaged_edge_gate_errors=averaged_edge_gate_errors, + averaged_readout_errors=averaged_readout_errors, + misc=misc, ) diff --git a/pytket/tests/transform_test.py b/pytket/tests/transform_test.py index 14d5d3afd8..a2efb31bfe 100644 --- a/pytket/tests/transform_test.py +++ b/pytket/tests/transform_test.py @@ -12,13 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. +import itertools +from typing import List from pathlib import Path -from pytket.circuit import Circuit, OpType, PauliExpBox # type: ignore +from pytket.circuit import Circuit, OpType, PauliExpBox, Node, Qubit # type: ignore +from pytket._tket.circuit import _library # type: ignore from pytket.pauli import Pauli # type: ignore -from pytket.passes import RemoveRedundancies, KAKDecomposition, ThreeQubitSquash, CommuteThroughMultis, PauliSquash, FullPeepholeOptimise, GlobalisePhasedX # type: ignore -from pytket.predicates import CompilationUnit # type: ignore +from pytket.passes import ( # type: ignore + RemoveRedundancies, + KAKDecomposition, + SquashCustom, + CommuteThroughMultis, + RebaseCustom, + PauliSquash, + FullPeepholeOptimise, + DefaultMappingPass, + FullMappingPass, + RoutingPass, + PlacementPass, + CXMappingPass, + auto_rebase_pass, + auto_squash_pass, +) +from pytket.predicates import CompilationUnit, NoMidMeasurePredicate # type: ignore +from pytket.passes.auto_rebase import _CX_CIRCS, NoAutoRebase from pytket.transform import Transform, CXConfigType, PauliSynthStrat # type: ignore from pytket.qasm import circuit_from_qasm +from pytket.architecture import Architecture # type: ignore +from pytket.mapping import MappingManager, LexiRouteRoutingMethod, LexiLabellingMethod # type: ignore +from pytket.placement import Placement, GraphPlacement, LinePlacement, NoiseAwarePlacement # type: ignore from sympy import Symbol # type: ignore import numpy as np @@ -618,7 +640,7 @@ def test_cu3_removal() -> None: def test_symbol_squash() -> None: # Test simplification of symbolic angles when squashing. - a = Symbol("a") + a = Symbol("a") # type: ignore circ = Circuit(1) circ.Ry(0.5, 0).Rz(a, 0).Ry(0.5, 0) circ1 = circ.copy() @@ -642,7 +664,7 @@ def test_symbol_squash() -> None: def symbolic_test_circ(n: int) -> Circuit: - a = Symbol("a") + a = Symbol("a") # type: ignore circ = Circuit(n) for i in range(n - 1, 0, -1): circ.CX(i, i - 1) @@ -663,7 +685,7 @@ def test_symbol_pauli_squash_1() -> None: circ1 = circ.copy() assert PauliSquash().apply(circ1) for x in np.arange(0.0, 4.0, 0.4): - smap = {Symbol("a"): x} + smap = {Symbol("a"): x} # type: ignore c = circ.copy() c.symbol_substitution(smap) u = c.get_unitary() @@ -681,7 +703,7 @@ def test_symbol_pauli_squash_2() -> None: circ1 = circ.copy() assert PauliSquash().apply(circ1) for x in np.arange(0.0, 4.0, 0.4): - smap = {Symbol("a"): x} + smap = {Symbol("a"): x} # type: ignore c = circ.copy() c.symbol_substitution(smap) u = c.get_unitary() @@ -729,6 +751,314 @@ def test_full_peephole_optimise() -> None: assert n_cx1 < n_cz +def test_decompose_swap_to_cx() -> None: + circ = Circuit(5) + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + circ.CX(0, 1) + circ.CX(0, 3) + circ.CX(2, 4) + circ.CX(1, 4) + circ.CX(0, 4) + + init_map = dict() + init_map[Qubit(0)] = Node(0) + init_map[Qubit(1)] = Node(1) + init_map[Qubit(2)] = Node(2) + init_map[Qubit(3)] = Node(3) + init_map[Qubit(4)] = Node(4) + + pl = Placement(arc) + pl.place_with_map(circ, init_map) + + MappingManager(arc).route_circuit( + circ, [LexiLabellingMethod(), LexiRouteRoutingMethod()] + ) + assert circ.valid_connectivity(arc, False) + Transform.DecomposeSWAPtoCX(arc).apply(circ) + assert len(circ.get_commands()) == 20 + Transform.DecomposeCXDirected(arc).apply(circ) + assert circ.valid_connectivity(arc, True) + assert len(circ.get_commands()) == 40 + + +def test_noncontiguous_DefaultMappingPass_arc() -> None: + arc = Architecture([[0, 2]]) + pass1 = DefaultMappingPass(arc) + c = Circuit(2) + pass1.apply(c) + + +def test_RoutingPass() -> None: + arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) + circ = Circuit(5) + circ.CX(0, 1) + circ.CX(0, 3) + circ.CX(2, 4) + circ.CX(1, 4) + circ.CX(1, 3) + circ.CX(1, 2) + cu_0 = CompilationUnit(circ) + placer = GraphPlacement(arc) + p_pass = PlacementPass(placer) + r_pass_0 = RoutingPass(arc) + p_pass.apply(cu_0) + r_pass_0.apply(cu_0) + out_circ_0 = cu_0.circuit + assert out_circ_0.valid_connectivity(arc, False, True) + + +def test_FullMappingPass() -> None: + arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) + circ = Circuit(5) + circ.CX(0, 1).CX(0, 3).CX(2, 4).CX(1, 4).CX(0, 4).CX(2, 1).CX(3, 0) + cu_0 = CompilationUnit(circ) + cu_1 = CompilationUnit(circ) + gp_placer = GraphPlacement(arc) + lp_placer = LinePlacement(arc) + m_pass_0 = FullMappingPass( + arc, gp_placer, [LexiLabellingMethod(), LexiRouteRoutingMethod()] + ) + m_pass_1 = FullMappingPass( + arc, lp_placer, [LexiLabellingMethod(), LexiRouteRoutingMethod()] + ) + m_pass_0.apply(cu_0) + m_pass_1.apply(cu_1) + out_circ_0 = cu_0.circuit + out_circ_1 = cu_1.circuit + assert out_circ_0.n_gates < out_circ_1.n_gates + assert out_circ_0.valid_connectivity(arc, False, True) + assert out_circ_1.valid_connectivity(arc, False, True) + + +def test_CXMappingPass() -> None: + arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) + circ = Circuit(5) + circ.Y(4).CX(0, 1).S(3).CX(0, 3).H(0).CX(2, 4).CX(1, 4).Y(1).CX(0, 4).CX(2, 1).Z( + 2 + ).CX(3, 0).CX(2, 0).CX(1, 3) + circ.measure_all() + cu_0 = CompilationUnit(circ) + cu_1 = CompilationUnit(circ) + gp_placer = GraphPlacement(arc) + lp_placer = LinePlacement(arc) + m_pass_0 = CXMappingPass( + arc, gp_placer, swap_lookahead=10, bridge_interactions=10, directed_cx=True + ) + m_pass_1 = CXMappingPass(arc, lp_placer, delay_measures=False) + m_pass_0.apply(cu_0) + m_pass_1.apply(cu_1) + out_circ_0 = cu_0.circuit + out_circ_1 = cu_1.circuit + + measure_pred = NoMidMeasurePredicate() + assert measure_pred.verify(cu_0.circuit) == True + assert measure_pred.verify(cu_1.circuit) == False + assert out_circ_0.valid_connectivity(arc, True) + assert out_circ_1.valid_connectivity(arc, False) + + +def test_DefaultMappingPass() -> None: + arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]]) + circ = Circuit(5) + circ.Y(4).CX(0, 1).S(3).CX(0, 3).H(0).CX(2, 4).CX(1, 4).Y(1).CX(0, 4).CX(2, 1).Z( + 2 + ).CX(3, 0).CX(2, 0).CX(1, 3).CX(1, 2) + circ.measure_all() + cu_0 = CompilationUnit(circ) + cu_1 = CompilationUnit(circ) + m_pass_0 = DefaultMappingPass(arc, delay_measures=True) + m_pass_1 = DefaultMappingPass(arc, delay_measures=False) + m_pass_0.apply(cu_0) + m_pass_1.apply(cu_1) + out_circ_0 = cu_0.circuit + out_circ_1 = cu_1.circuit + measure_pred = NoMidMeasurePredicate() + assert measure_pred.verify(out_circ_0) == True + assert measure_pred.verify(out_circ_1) == False + assert out_circ_0.valid_connectivity(arc, False, True) + assert out_circ_1.valid_connectivity(arc, False, True) + + +def test_CXMappingPass_correctness() -> None: + # TKET-1045 + arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]]) + placer = NoiseAwarePlacement(arc) + p = CXMappingPass(arc, placer, directed_cx=True, delay_measures=True) + c = Circuit(3).CX(0, 1).CX(1, 2).CCX(2, 1, 0).CY(1, 0).CY(2, 1) + cu = CompilationUnit(c) + p.apply(cu) + c1 = cu.circuit + u1 = c1.get_unitary() + assert all(np.isclose(abs(x), 0) or np.isclose(abs(x), 1) for x in u1.flatten()) + + +def test_CXMappingPass_terminates() -> None: + # TKET-1376 + c = circuit_from_qasm( + Path(__file__).resolve().parent / "qasm_test_files" / "test13.qasm" + ) + arc = Architecture( + [ + [0, 1], + [1, 0], + [1, 2], + [1, 4], + [2, 1], + [2, 3], + [3, 2], + [3, 5], + [4, 1], + [4, 7], + [5, 3], + [5, 8], + [6, 7], + [7, 4], + [7, 6], + [7, 10], + [8, 5], + [8, 9], + [8, 11], + [9, 8], + [10, 7], + [10, 12], + [11, 8], + [11, 14], + [12, 10], + [12, 13], + [12, 15], + [13, 12], + [13, 14], + [14, 11], + [14, 13], + [14, 16], + [15, 12], + [15, 18], + [16, 14], + [16, 19], + [17, 18], + [18, 15], + [18, 17], + [18, 21], + [19, 16], + [19, 20], + [19, 22], + [20, 19], + [21, 18], + [21, 23], + [22, 19], + [22, 25], + [23, 21], + [23, 24], + [24, 23], + [24, 25], + [25, 22], + [25, 24], + [25, 26], + [26, 25], + ] + ) + placer = NoiseAwarePlacement(arc) + placer.modify_config(timeout=10000) + p = CXMappingPass(arc, placer, directed_cx=False, delay_measures=False) + res = p.apply(c) + assert res + + +def test_auto_rebase() -> None: + pass_params = [ + ({OpType.CX, OpType.Rz, OpType.Rx}, _library._CX(), _library._TK1_to_RzRx), + ( + {OpType.CZ, OpType.Rz, OpType.SX, OpType.ZZPhase}, + _CX_CIRCS[OpType.CZ](), + _library._TK1_to_RzSX, + ), + ( + {OpType.ZZMax, OpType.T, OpType.Rz, OpType.H}, + _library._CX_using_ZZMax(), + _library._TK1_to_RzH, + ), + ( + {OpType.XXPhase, OpType.T, OpType.Rz, OpType.H}, + _library._CX_using_XXPhase_0(), + _library._TK1_to_RzH, + ), + ( + {OpType.ECR, OpType.PhasedX, OpType.Rz, OpType.CnX}, + _library._CX_using_ECR(), + _library._TK1_to_PhasedXRz, + ), + ( + {OpType.CX, OpType.TK1, OpType.U3, OpType.CnX}, + _library._CX(), + _library._TK1_to_TK1, + ), + ] + + circ = get_test_circuit() + + for gateset, cx_circ, TK1_func in pass_params: + rebase = auto_rebase_pass(gateset) + assert rebase.to_dict() == RebaseCustom(gateset, cx_circ, TK1_func).to_dict() + + c2 = circ.copy() + assert rebase.apply(c2) + + with pytest.raises(NoAutoRebase) as cx_err: + _ = auto_rebase_pass({OpType.ZZPhase, OpType.TK1}) + assert "CX" in str(cx_err.value) + + with pytest.raises(NoAutoRebase) as cx_err: + _ = auto_rebase_pass({OpType.CX, OpType.H, OpType.T}) + assert "TK1" in str(cx_err.value) + + +def test_auto_squash() -> None: + pass_params = [ + ({OpType.Rz, OpType.Rx}, _library._TK1_to_RzRx), + ( + {OpType.Rz, OpType.SX}, + _library._TK1_to_RzSX, + ), + ( + {OpType.T, OpType.Rz, OpType.H}, + _library._TK1_to_RzH, + ), + ( + {OpType.T, OpType.Rz, OpType.H}, + _library._TK1_to_RzH, + ), + ( + {OpType.PhasedX, OpType.Rz}, + _library._TK1_to_PhasedXRz, + ), + ( + {OpType.TK1, OpType.U3}, + _library._TK1_to_TK1, + ), + ] + + for gateset, TK1_func in pass_params: + circ = Circuit(1) + for gate in itertools.islice(itertools.cycle(gateset), 5): + # make a sequence of 5 gates from gateset to make sure squash does + # something + params: List[float] = [] + while True: + try: + circ.add_gate(gate, params, [0]) + break + except (RuntimeError, TypeError): + params.append(0.1) + squash = auto_squash_pass(gateset) + assert squash.to_dict() == SquashCustom(gateset, TK1_func).to_dict() + + assert squash.apply(circ) + + with pytest.raises(NoAutoRebase) as tk_err: + _ = auto_squash_pass({OpType.H, OpType.T}) + assert "TK1" in str(tk_err.value) + + if __name__ == "__main__": test_remove_redundancies() test_reduce_singles() @@ -746,3 +1076,11 @@ def test_full_peephole_optimise() -> None: test_implicit_swaps_1() test_implicit_swaps_2() test_implicit_swaps_3() + test_decompose_swap_to_cx() + test_noncontiguous_DefaultMappingPass_arc() + test_RoutingPass() + test_DefaultMappingPass() + test_CXMappingPass() + test_CXMappingPass_correctness() + test_CXMappingPass_terminates() + test_FullMappingPass() diff --git a/pytket/tests/utils_test.py b/pytket/tests/utils_test.py index a335f6c567..4b66f6281c 100644 --- a/pytket/tests/utils_test.py +++ b/pytket/tests/utils_test.py @@ -444,7 +444,7 @@ def unitary_circuits(draw: Callable[[SearchStrategy[Any]], Any]) -> Circuit: # available qubits as integers qb_strat = strategies.integers(min_value=0, max_value=n_qb - 1) # some symbols to sample from - syms = symbols("a b c d e") + syms = symbols("a b c d e") # type: ignore c = Circuit(n_qb) optype_dict = { @@ -564,8 +564,8 @@ def test_symbolic_conversion(circ: Circuit) -> None: substitutions = [(sym, val) for sym, val in zip(free_symbs, bind_vals)] circ.symbol_substitution(dict(substitutions)) - sym_unitary = sym_unitary.subs(substitutions) - sym_state = sym_state.subs(substitutions) + sym_unitary = sym_unitary.subs(substitutions) # type: ignore + sym_state = sym_state.subs(substitutions) # type: ignore numeric_unitary = np.array(sym_unitary).astype(np.complex128) numeric_state = np.array(sym_state).astype(np.complex128) diff --git a/pytket/tket b/pytket/tket deleted file mode 100755 index 9e607099eb..0000000000 --- a/pytket/tket +++ /dev/null @@ -1,264 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2019-2022 Cambridge Quantum Computing -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -from importlib import import_module -import textwrap -from pytket.passes import SequencePass -from pytket.predicates import CompilationUnit -from pytket.qasm import circuit_from_qasm, circuit_to_qasm -from pytket.quipper import circuit_from_quipper - -all_passes = [ - "CommuteThroughMultis", - "DecomposeArbitrarilyControlledGates", - "DecomposeBoxes", - "DelayMeasures", - "FlattenRegisters", - "FullPeepholeOptimise", - "KAKDecomposition", - "RebaseCirq", - "RebaseHQS", - "RebaseProjectQ", - "RebasePyZX", - "RebaseQuil", - "RebaseTket", - "RebaseUMD", - "RemoveRedundancies", - "SynthesiseHQS", - "SynthesiseUMD", -] -pass_module = import_module("pytket.passes") -pass_dict = dict((pname, getattr(pass_module, pname)) for pname in all_passes) - -backend_providers = { - "aqt": [ - "AQTBackend", - ], - "pyquil": [ - "ForestBackend", - ], - "honeywell": [ - "HoneywellBackend", - ], - "qiskit": [ - "AerBackend", - "AerStateBackend", - "AerUnitaryBackend", - "IBMQBackend", - ], - "projectq": [ - "ProjectQBackend", - ], - "qsharp": [ - "QsharpEstimatorBackend", - "QsharpSimulatorBackend", - "QsharpToffoliSimulatorBackend", - ], - "qulacs": [ - "QulacsBackend", - "QulacsGPUBackend", - ], - "braket": [ - "BraketBackend", - ], -} -installed_providers = set() -backend_modules = {} -for provider in backend_providers.keys(): - try: - module = import_module("pytket.extensions.{}".format(provider)) - installed_providers.add(provider) - backend_modules[provider] = module - except ModuleNotFoundError: - pass -backend_dict = dict( - (bname, getattr(backend_modules[provider], bname)) - for provider, blist in backend_providers.items() - for bname in blist - if provider in installed_providers -) - -wrapper = textwrap.TextWrapper(subsequent_indent=" " * 8, width=80) - - -def read_input(infile, fmt): - if fmt == "qasm": - return circuit_from_qasm(infile) - elif fmt == "quipper": - return circuit_from_quipper(infile) - - -def write_output(circ, outfile, fmt): - if fmt == "qasm": - circuit_to_qasm(circ, outfile) - elif fmt == "latex": - circ.to_latex_file(outfile) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Compile and run quantum circuits.") - parser.add_argument("--infile", help="Input circuit file.") - parser.add_argument("--outfile", help="Output circuit file.") - parser.add_argument( - "--informat", - default="qasm", - choices=["qasm", "quipper"], - help="Input file format (default = %(default)s).", - ) - parser.add_argument( - "--outformat", - default="qasm", - choices=["qasm", "latex"], - help="Output file format (default = %(default)s).", - ) - parser.add_argument( - "-p", - "--passname", - nargs="*", - help="Apply compilation pass or passes (in order provided). If the special name 'default' is provided here, it refers to the default compilation pass for the backend, if a backend is specified (otherwise it is ignored).", - ) - parser.add_argument( - "--list-passes", - action="store_true", - help="List available compilation passes, with brief descriptions.", - ) - parser.add_argument("-b", "--backend", help="Backend on which to run circuit.") - parser.add_argument( - "--list-backends", - action="store_true", - help="List available backends, with brief descriptions.", - ) - parser.add_argument( - "-r", - "--run", - action="store_true", - help="Run the compiled circuit on the backend.", - ) - parser.add_argument( - "-n", "--n-shots", type=int, default=1, help="Number of shots to run." - ) - result_opts = parser.add_mutually_exclusive_group() - result_opts.add_argument( - "--show-counts", - action="store_true", - help="Show a summary of counts from the backend.", - ) - result_opts.add_argument( - "--show-shots", - action="store_true", - help="Show a full table of shots from the backend.", - ) - result_opts.add_argument( - "--show-state", - action="store_true", - help="Show the state vector from all-0 input computed by the backend.", - ) - result_opts.add_argument( - "--show-unitary", - action="store_true", - help="Show the unitary matrix computed by the backend.", - ) - parser.add_argument( - "-v", "--verbose", action="store_true", help="Print verbose output." - ) - args = parser.parse_args() - if args.list_passes: - print("Available passes:") - for pname, p in pass_dict.items(): - pdesc = p.__doc__.strip().split("\n")[-1] - for line in wrapper.wrap("{}: {}".format(pname, pdesc)): - print(line) - if args.list_backends: - print("Available backends:") - for bname, b in backend_dict.items(): - bdesc = ( - b.__doc__.strip().split("\n")[0] - if b.__doc__ - else b.__init__.__doc__.strip().split("\n")[0] - ) - for line in wrapper.wrap("{}: {}".format(bname, bdesc)): - print(line) - if args.infile: - circ = read_input(args.infile, args.informat) - if args.backend: - back = backend_dict[args.backend]() - default_pass = back.default_compilation_pass - if args.passname: - pass_list = [ - default_pass if pname == "default" else pass_dict[pname]() - for pname in args.passname - ] - else: - if args.passname: - pass_list = [ - pass_dict[pname]() for pname in args.passname if pname != "default" - ] - if args.verbose: - print( - "Read circuit from {} ({} qubits, {} gates, depth {}).".format( - args.infile, circ.n_qubits, circ.n_gates, circ.depth() - ) - ) - if args.passname: - cu = CompilationUnit(circ) - seqpass = SequencePass(pass_list) - if args.verbose: - print("Compiling circuit...") - applied = seqpass.apply(cu) - circ1 = cu.circuit - if args.verbose: - if applied: - print( - "Compiled to new circuit ({} qubits, {} gates, depth {}).".format( - circ.n_qubits, circ.n_gates, circ.depth() - ) - ) - else: - print("Circuit unchanged by compilation.") - else: - circ1 = circ - if args.outfile: - write_output(circ1, args.outfile, args.outformat) - if args.verbose: - print("Written new circuit to {}.".format(args.outfile)) - if args.run: - if not args.backend: - raise Exception("Cannot run circuit: no backend provided.") - n_shots = args.n_shots if back.supports_shots else None - back.process_circuits([circ1], n_shots=n_shots) - if args.verbose: - print("Submitted circuit to backend.") - if args.show_counts: - if back.supports_counts: - print(back.get_counts(circ1)) - else: - print("Backend does not support 'counts'.") - elif args.show_shots: - if back.supports_shots: - print(back.get_shots(circ1)) - else: - print("Backend does not support 'shots'.") - elif args.show_state: - if back.supports_state: - print(back.get_state(circ1)) - else: - print("Backend does not support 'state'.") - elif args.show_unitary: - if back.supports_unitary: - print(back.get_unitary(circ1)) - else: - print("Backend does not support 'unitary'.") diff --git a/recipes/pybind11/conanfile.py b/recipes/pybind11/conanfile.py index da288e6d8d..9419dc7dfe 100644 --- a/recipes/pybind11/conanfile.py +++ b/recipes/pybind11/conanfile.py @@ -19,7 +19,7 @@ class PyBind11Conan(ConanFile): name = "pybind11" - version = "2.8.1" + version = "2.9.1" description = "Seamless operability between C++11 and Python" topics = "conan", "pybind11", "python", "binding" homepage = "https://github.com/pybind/pybind11" diff --git a/recipes/symengine/conanfile.py b/recipes/symengine/conanfile.py index 8a35b826b8..029851a62c 100644 --- a/recipes/symengine/conanfile.py +++ b/recipes/symengine/conanfile.py @@ -20,7 +20,7 @@ class SymengineConan(ConanFile): name = "symengine" - version = "0.8.1.1" + version = "0.9.0" description = "A fast symbolic manipulation library, written in C++" license = "MIT" topics = ("symbolic", "algebra") @@ -49,12 +49,10 @@ def requirements(self): self.requires("gmp/6.2.1") def source(self): - git = tools.Git() - git.clone( - "https://github.com/symengine/symengine", - branch="2eb109a0e554b62683662cc5559fccf2ea0c0348", - shallow=True, + tools.get( + f"https://github.com/symengine/symengine/releases/download/v{self.version}/symengine-{self.version}.tar.gz" ) + os.rename(f"symengine-{self.version}", "symengine") def _configure_cmake(self): if self._cmake is None: @@ -63,7 +61,9 @@ def _configure_cmake(self): self._cmake.definitions["BUILD_BENCHMARKS"] = False self._cmake.definitions["INTEGER_CLASS"] = self.options.integer_class self._cmake.definitions["MSVC_USE_MT"] = False - self._cmake.configure() + self._cmake.configure( + source_dir=os.path.join(self.source_folder, "symengine") + ) return self._cmake def config_options(self): @@ -76,7 +76,7 @@ def configure(self): def build(self): tools.replace_in_file( - os.path.join(self.source_folder, "CMakeLists.txt"), + os.path.join(self.source_folder, "symengine", "CMakeLists.txt"), "project(symengine)", """project(symengine) include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) @@ -86,7 +86,9 @@ def build(self): cmake.build() def package(self): - self.copy("LICENSE", dst="licenses", src=self.source_folder) + self.copy( + "LICENSE", dst="licenses", src=os.path.join(self.source_folder, "symengine") + ) cmake = self._configure_cmake() cmake.install() cmake.patch_config_paths() diff --git a/recipes/tket-tests/conanfile.py b/recipes/tket-tests/conanfile.py index 55330bd070..afd7ab6371 100644 --- a/recipes/tket-tests/conanfile.py +++ b/recipes/tket-tests/conanfile.py @@ -26,11 +26,13 @@ class TketTestsConan(ConanFile): description = "Unit tests for tket" topics = ("quantum", "computation", "compiler") settings = "os", "compiler", "build_type", "arch" - options = {"with_coverage": [True, False]} - default_options = {"with_coverage": False} + options = {"with_coverage": [True, False], "full": [True, False]} + default_options = {"with_coverage": False, "full": False} generators = "cmake" exports_sources = "../../tket/tests/*" - requires = ("tket/1.0.1", "catch2/2.13.7") + requires = ("tket/1.0.1", "catch2/2.13.8") + + _cmake = None def validate(self): if self.options.with_coverage and self.settings.compiler != "gcc": @@ -38,13 +40,19 @@ def validate(self): "`with_coverage` option only available with gcc" ) + def _configure_cmake(self): + if self._cmake is None: + self._cmake = CMake(self) + self._cmake.definitions["TESTS_FULL"] = self.options.full + self._cmake.configure() + return self._cmake + def configure(self): if self.options.with_coverage: self.options["tket"].profile_coverage = True def build(self): - cmake = CMake(self) - cmake.configure() + cmake = self._configure_cmake() cmake.build() def package(self): diff --git a/recipes/tket/conanfile.py b/recipes/tket/conanfile.py index fd888d2ccc..10a9f6b6c3 100644 --- a/recipes/tket/conanfile.py +++ b/recipes/tket/conanfile.py @@ -32,7 +32,7 @@ class TketConan(ConanFile): "profile_coverage": [True, False], "spdlog_ho": [True, False], } - default_options = {"shared": False, "profile_coverage": False, "spdlog_ho": True} + default_options = {"shared": False, "profile_coverage": False, "spdlog_ho": False} generators = "cmake" # Putting "patches" in both "exports_sources" and "exports" means that this works # in either the CI workflow (`conan create`) or the development workflow @@ -41,10 +41,11 @@ class TketConan(ConanFile): exports = ["patches/*"] requires = ( "boost/1.78.0", - "symengine/0.8.1.1", + # symengine from remote: https://tket.jfrog.io/artifactory/api/conan/tket-conan + "symengine/0.9.0@tket/stable", "eigen/3.4.0", "spdlog/1.9.2", - "nlohmann_json/3.10.4", + "nlohmann_json/3.10.5", ) comps = [ @@ -63,7 +64,9 @@ class TketConan(ConanFile): "Program", "Characterisation", "Converters", - "Routing", + "TokenSwapping", + "Mapping", + "Placement", "MeasurementSetup", "Transformations", "ArchAwareSynth", @@ -130,6 +133,7 @@ def package(self): for comp in self.comps: self.copy(f"{comp}/include/*.hpp", dst=f"include/{comp}", keep_path=False) self.copy("*.dll", dst="lib", keep_path=False) + self.copy("*.dll", dst="bin", keep_path=False) self.copy("*.lib", dst="lib", keep_path=False) self.copy("*.so", dst="lib", keep_path=False) self.copy("*.dylib", dst="lib", keep_path=False) diff --git a/recipes/tket/test_package/CMakeLists.txt b/recipes/tket/test_package/CMakeLists.txt index 21191629e5..7ffc47f03b 100644 --- a/recipes/tket/test_package/CMakeLists.txt +++ b/recipes/tket/test_package/CMakeLists.txt @@ -39,4 +39,5 @@ target_link_libraries(test PRIVATE tket-Transformations tket-Utils) -target_link_libraries(test PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(test PRIVATE + ${CONAN_LIBS_FMT} ${CONAN_LIBS_SPDLOG} ${CONAN_LIBS_SYMENGINE}) diff --git a/recipes/tket/test_package/conanfile.py b/recipes/tket/test_package/conanfile.py index 986980cbf6..4760d30992 100644 --- a/recipes/tket/test_package/conanfile.py +++ b/recipes/tket/test_package/conanfile.py @@ -13,9 +13,6 @@ # limitations under the License. import os -from shutil import copyfile -import platform - from conans import ConanFile, CMake, tools @@ -29,18 +26,7 @@ def build(self): cmake.configure() cmake.build() - def imports(self): - self.copy("*", src="@bindirs", dst="bin") - self.copy("*", src="@libdirs", dst="lib") - def test(self): if not tools.cross_building(self): - lib_files = os.listdir(os.path.join(self.install_folder, "lib")) - for lib_file in lib_files: - if "tket" in lib_file: - copyfile( - os.path.join(self.install_folder, "lib", lib_file), - os.path.join("bin", lib_file), - ) - os.chdir("bin") - self.run(os.path.join(os.curdir, "test")) + bin_path = os.path.join("bin", "test") + self.run(bin_path, run_environment=True) diff --git a/schemas/compiler_pass_v1.json b/schemas/compiler_pass_v1.json index dd59080ebd..0a473e7129 100644 --- a/schemas/compiler_pass_v1.json +++ b/schemas/compiler_pass_v1.json @@ -114,24 +114,17 @@ "type": "string", "description": "The name of the compiler pass. Matches the name of the pytket method used to generate it." }, - "basis_multiqs": { + "basis_allowed": { "type": "array", "items": { "type": "string" }, - "description": "OpTypes of supported multi-qubit gates. Used in \"RebaseCustom\"." + "description": "OpTypes of supported gates. Used in \"RebaseCustom\"." }, "basis_cx_replacement": { "$ref": "file:///circuit_v1.json#", "description": "A circuit implementing a CX gate in a target gate set. Used in \"RebaseCustom\"." }, - "basis_singleqs": { - "type": "array", - "items": { - "type": "string" - }, - "description": "OpTypes of supported single-qubit gates. Used in \"RebaseCustom\" and \"SquashCustom\"." - }, "basis_tk1_replacement": { "type": "string", "description": "A method for generating optimised single-qubit unitary circuits in a target gate set. This string should be interpreted by Python \"dill\" into a function. Used in \"RebaseCustom\" and \"SquashCustom\"." @@ -183,9 +176,6 @@ "architecture": { "$ref": "#/definitions/architecture" }, - "routing_config": { - "$ref": "#/definitions/routing_config" - }, "directed": { "type": "boolean", "description": "Whether to consider directedness of the architecture for CXs in \"DecomposeSwapsToCXs\"." @@ -247,9 +237,8 @@ }, "then": { "required": [ - "basis_multiqs", + "basis_allowed", "basis_cx_replacement", - "basis_singleqs", "basis_tk1_replacement" ] } @@ -414,6 +403,20 @@ ] } }, + { + "if": { + "properties": { + "name": { + "const": "NaivePlacementPass" + } + } + }, + "then": { + "required": [ + "architecture" + ] + } + }, { "if": { "properties": { @@ -484,7 +487,8 @@ }, "then": { "required": [ - "architecture" + "architecture", + "delay_measures" ] } }, @@ -1002,36 +1006,22 @@ "op_link_errors" ] }, - "routing_config": { + "routingmethod": { "type": "object", - "description": "A configuration for the routing procedure.", + "description": "A method used during circuit mapping.", "properties": { - "depth_limit": { - "type": "integer", - "minimum": 0, - "description": "The look ahead limit for SWAP picking." - }, - "distrib_limit": { - "type": "integer", - "minimum": 0, - "description": "The look ahead limit for Distributed CX gate checking." - }, - "interactions_limit": { - "type": "integer", - "description": "The number of interactions considered in Distributed CX gate checking." + "name": { + "type": "string", + "description": "Name of method." }, - "distrib_exponent": { - "type": "integer", - "minimum": 0, - "description": "A factor to balance the consideration for later gates when deciding on Distributed CX gates." - } }, - "required": [ - "depth_limit", - "distrib_limit", - "interactions_limit", - "distrib_exponent" - ] - } + }, + "routing_config": { + "type": "array", + "description": "A configuration for routing defined by an array of RoutingMethod.", + "items": { + "$ref": "#/definitions/routingmethod" + }, + }, } } \ No newline at end of file diff --git a/schemas/pytket_config_v1.json b/schemas/pytket_config_v1.json index 6626a45259..34afcbd026 100644 --- a/schemas/pytket_config_v1.json +++ b/schemas/pytket_config_v1.json @@ -4,21 +4,12 @@ "type": "object", "description": "Configuration file for pytket. Stored at ~/.config/pytket/config.json", "properties": { - "enable_telemetry": { - "type": "boolean", - "description": "Whether pytket telemetry is enabled." - }, - "telemetry_id": { - "type": ["string", "null"], - "description": "Unique identifier for system if telemetry is enabled." - }, "extensions": { "type": "object", "description": "Configuration parameters for pytket extension packages." } }, "required": [ - "enable_telemetry", "extensions" ] -} \ No newline at end of file +} diff --git a/tket/proptests/CMakeLists.txt b/tket/proptests/CMakeLists.txt index 529f3c2366..0dcba102b3 100644 --- a/tket/proptests/CMakeLists.txt +++ b/tket/proptests/CMakeLists.txt @@ -49,9 +49,11 @@ target_link_libraries(proptest PRIVATE tket-Ops tket-OpType tket-Predicates - tket-Routing + tket-TokenSwapping + tket-Mapping tket-Simulation tket-Transformations tket-Utils) -target_link_libraries(proptest PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(proptest PRIVATE + ${CONAN_LIBS_FMT} ${CONAN_LIBS_SPDLOG} ${CONAN_LIBS_SYMENGINE}) diff --git a/tket/proptests/proptest.cpp b/tket/proptests/proptest.cpp index 79a8052198..dd254747cb 100644 --- a/tket/proptests/proptest.cpp +++ b/tket/proptests/proptest.cpp @@ -335,7 +335,7 @@ bool check_mapping() { // All gates must act on 1 or 2 qubits. PredicatePtr pp3 = std::make_shared(); if (!pp3->verify(c)) return; - PassPtr pass = gen_default_mapping_pass(a); + PassPtr pass = gen_default_mapping_pass(a, true); CompilationUnit cu(c); bool applied = pass->apply(cu); const Circuit &c1 = cu.get_circ_ref(); diff --git a/tket/src/ArchAwareSynth/CMakeLists.txt b/tket/src/ArchAwareSynth/CMakeLists.txt index 211d458a3e..0556a9c0a0 100644 --- a/tket/src/ArchAwareSynth/CMakeLists.txt +++ b/tket/src/ArchAwareSynth/CMakeLists.txt @@ -31,10 +31,10 @@ list(APPEND DEPS_${COMP} Converters Gate Graphs + PauliGraph + Placement Ops OpType - PauliGraph - Routing Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/ArchAwareSynth/include/ArchAwareSynth/Path.hpp b/tket/src/ArchAwareSynth/include/ArchAwareSynth/Path.hpp index c796e6a5b6..9b54c81ff5 100644 --- a/tket/src/ArchAwareSynth/include/ArchAwareSynth/Path.hpp +++ b/tket/src/ArchAwareSynth/include/ArchAwareSynth/Path.hpp @@ -14,7 +14,7 @@ #pragma once #include "Architecture/Architecture.hpp" -#include "Routing/Placement.hpp" +#include "Placement/Placement.hpp" #include "Utils/MatrixAnalysis.hpp" #include "Utils/UnitID.hpp" namespace tket { diff --git a/tket/src/Architecture/Architecture.cpp b/tket/src/Architecture/Architecture.cpp index f322ba5d27..fc577e5f5d 100644 --- a/tket/src/Architecture/Architecture.cpp +++ b/tket/src/Architecture/Architecture.cpp @@ -24,8 +24,22 @@ namespace tket { +bool Architecture::valid_operation(const std::vector& uids) const { + for (Node n : uids) { + if (!this->node_exists(Node(n))) return false; + } + if (uids.size() == 1) { + return true; + } else if (uids.size() == 2) { + if (this->bidirectional_edge_exists(uids[0], uids[1])) { + return true; + } + } + return false; +} + Architecture Architecture::create_subarch( - const std::vector& subarc_nodes) { + const std::vector& subarc_nodes) const { Architecture subarc(subarc_nodes); for (auto [u1, u2] : get_all_edges_vec()) { if (subarc.node_exists(u1) && subarc.node_exists(u2)) { @@ -88,8 +102,7 @@ std::set Architecture::get_articulation_points() const { } static bool lexicographical_comparison( - const std::vector& dist1, - const std::vector& dist2) { + const std::vector& dist1, const std::vector& dist2) { return std::lexicographical_compare( dist1.begin(), dist1.end(), dist2.begin(), dist2.end()); } @@ -108,7 +121,7 @@ std::optional Architecture::find_worst_node( return std::nullopt; } - std::vector worst_distances, temp_distances; + std::vector worst_distances, temp_distances; Node worst_node = *bad_nodes.begin(); worst_distances = get_distances(worst_node); for (Node temp_node : bad_nodes) { @@ -120,9 +133,9 @@ std::optional Architecture::find_worst_node( worst_node = temp_node; worst_distances = temp_distances; } else if (distance_comp == -1) { - std::vector temp_distances_full = + std::vector temp_distances_full = original_arch.get_distances(temp_node); - std::vector worst_distances_full = + std::vector worst_distances_full = original_arch.get_distances(worst_node); if (lexicographical_comparison( temp_distances_full, worst_distances_full)) { diff --git a/tket/src/Architecture/ArchitectureMapping.cpp b/tket/src/Architecture/ArchitectureMapping.cpp new file mode 100644 index 0000000000..3e072d7f4e --- /dev/null +++ b/tket/src/Architecture/ArchitectureMapping.cpp @@ -0,0 +1,133 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ArchitectureMapping.hpp" + +#include +#include + +#include "Utils/Assert.hpp" + +namespace tket { + +ArchitectureMapping::ArchitectureMapping(const Architecture& arch) + : m_arch(arch) { + const auto uids = arch.nodes(); + m_vertex_to_node_mapping.reserve(uids.size()); + for (const UnitID& uid : uids) { + m_vertex_to_node_mapping.emplace_back(Node(uid)); + } + + for (size_t ii = 0; ii < m_vertex_to_node_mapping.size(); ++ii) { + const auto& node = m_vertex_to_node_mapping[ii]; + { + const auto citer = m_node_to_vertex_mapping.find(node); + // GCOVR_EXCL_START + TKET_ASSERT( + citer == m_node_to_vertex_mapping.cend() || + AssertMessage() << "Duplicate node " << node.repr() << " at vertices " + << citer->second << ", " << ii); + // GCOVR_EXCL_STOP + } + m_node_to_vertex_mapping[node] = ii; + } +} + +ArchitectureMapping::ArchitectureMapping( + const Architecture& arch, + const std::vector>& edges) + : m_arch(arch) { + auto& node_to_vertex_mapping = m_node_to_vertex_mapping; + auto& vertex_to_node_mapping = m_vertex_to_node_mapping; + + const auto add_node = [&node_to_vertex_mapping, + &vertex_to_node_mapping](unsigned nn) { + const Node node(nn); + if (node_to_vertex_mapping.count(node) == 0) { + node_to_vertex_mapping[node] = vertex_to_node_mapping.size(); + vertex_to_node_mapping.push_back(node); + } + }; + + // The nodes are labelled 0,1,2,... in order of appearance. + // Nothing special about this ordering, just for backwards compatibility. + for (const auto& entry : edges) { + add_node(entry.first); + add_node(entry.second); + } + + // Check that the nodes agree with the architecture object. + const auto uids = arch.nodes(); + // GCOVR_EXCL_START + TKET_ASSERT( + uids.size() == m_vertex_to_node_mapping.size() || + AssertMessage() << "passed in " << edges.size() << " edges, giving " + << m_vertex_to_node_mapping.size() + << " vertices; but the architecture object has " + << uids.size() << " vertices"); + // GCOVR_EXCL_STOP + + for (const UnitID& uid : uids) { + const Node node(uid); + // GCOVR_EXCL_START + TKET_ASSERT( + m_node_to_vertex_mapping.count(node) != 0 || + AssertMessage() + << "passed in " << edges.size() << " edges, giving " + << m_vertex_to_node_mapping.size() + << " vertices; but the architecture object has an unknown node " + << node.repr()); + // GCOVR_EXCL_STOP + } +} + +size_t ArchitectureMapping::number_of_vertices() const { + return m_vertex_to_node_mapping.size(); +} + +const Node& ArchitectureMapping::get_node(size_t vertex) const { + const auto num_vertices = number_of_vertices(); + // GCOVR_EXCL_START + TKET_ASSERT( + vertex < num_vertices || AssertMessage() << "invalid vertex " << vertex + << " (architecture only has " + << num_vertices << " vertices)"); + // GCOVR_EXCL_STOP + + return m_vertex_to_node_mapping[vertex]; +} + +size_t ArchitectureMapping::get_vertex(const Node& node) const { + const auto citer = m_node_to_vertex_mapping.find(node); + // GCOVR_EXCL_START + TKET_ASSERT( + citer != m_node_to_vertex_mapping.cend() || + AssertMessage() << "node " << node.repr() << " has no vertex number"); + // GCOVR_EXCL_STOP + return citer->second; +} + +const Architecture& ArchitectureMapping::get_architecture() const { + return m_arch; +} + +std::vector ArchitectureMapping::get_edges() const { + std::vector edges; + for (auto [node1, node2] : m_arch.get_all_edges_vec()) { + edges.emplace_back(get_swap(get_vertex(node1), get_vertex(node2))); + } + return edges; +} + +} // namespace tket diff --git a/tket/src/Architecture/BestTsaWithArch.cpp b/tket/src/Architecture/BestTsaWithArch.cpp new file mode 100644 index 0000000000..9e2e28fb0b --- /dev/null +++ b/tket/src/Architecture/BestTsaWithArch.cpp @@ -0,0 +1,78 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "BestTsaWithArch.hpp" + +#include "DistancesFromArchitecture.hpp" +#include "NeighboursFromArchitecture.hpp" +#include "TokenSwapping/BestFullTsa.hpp" +#include "Utils/Assert.hpp" +#include "Utils/RNG.hpp" + +namespace tket { + +using namespace tsa_internal; + +void BestTsaWithArch::append_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + const ArchitectureMapping& arch_mapping) { + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + RNG rng; + RiverFlowPathFinder path_finder(distances, neighbours, rng); + BestFullTsa().append_partial_solution( + swaps, vertex_mapping, distances, neighbours, path_finder); +} + +std::vector> BestTsaWithArch::get_swaps( + const Architecture& architecture, const NodeMapping& node_mapping) { + std::vector> swaps; + // Before all the conversion and object construction, + // doesn't take long to check if it's actually trivial + bool trivial = true; + for (const auto& entry : node_mapping) { + if (entry.first != entry.second) { + trivial = false; + break; + } + } + if (trivial) { + return swaps; + } + // Now convert the Nodes into raw vertices for use in TSA objects. + const ArchitectureMapping arch_mapping(architecture); + VertexMapping vertex_mapping; + for (const auto& node_entry : node_mapping) { + vertex_mapping[arch_mapping.get_vertex(node_entry.first)] = + arch_mapping.get_vertex(node_entry.second); + } + TKET_ASSERT(vertex_mapping.size() == node_mapping.size()); + check_mapping(vertex_mapping); + + SwapList raw_swap_list; + BestTsaWithArch::append_solution(raw_swap_list, vertex_mapping, arch_mapping); + + // Finally, convert the raw swaps back to nodes. + swaps.reserve(raw_swap_list.size()); + for (auto id_opt = raw_swap_list.front_id(); id_opt; + id_opt = raw_swap_list.next(id_opt.value())) { + const auto& raw_swap = raw_swap_list.at(id_opt.value()); + swaps.emplace_back(std::make_pair( + arch_mapping.get_node(raw_swap.first), + arch_mapping.get_node(raw_swap.second))); + } + return swaps; +} + +} // namespace tket diff --git a/tket/src/Architecture/CMakeLists.txt b/tket/src/Architecture/CMakeLists.txt index 30f3b33135..ff18aaaa3d 100644 --- a/tket/src/Architecture/CMakeLists.txt +++ b/tket/src/Architecture/CMakeLists.txt @@ -20,12 +20,18 @@ endif() add_library(tket-${COMP} Architecture.cpp - ArchitectureGraphClasses.cpp) + ArchitectureGraphClasses.cpp + ArchitectureMapping.cpp + BestTsaWithArch.cpp + DistancesFromArchitecture.cpp + NeighboursFromArchitecture.cpp) list(APPEND DEPS_${COMP} Graphs + TokenSwapping Utils) + foreach(DEP ${DEPS_${COMP}}) target_include_directories( tket-${COMP} PRIVATE ${TKET_${DEP}_INCLUDE_DIR}) diff --git a/tket/src/Architecture/DistancesFromArchitecture.cpp b/tket/src/Architecture/DistancesFromArchitecture.cpp new file mode 100644 index 0000000000..fb42a2aa21 --- /dev/null +++ b/tket/src/Architecture/DistancesFromArchitecture.cpp @@ -0,0 +1,91 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "DistancesFromArchitecture.hpp" + +#include +#include + +namespace tket { + +DistancesFromArchitecture::DistancesFromArchitecture( + const ArchitectureMapping& arch_mapping) + : m_arch_mapping(arch_mapping) {} + +void DistancesFromArchitecture::register_shortest_path( + const std::vector& path) { + // To avoid quadratic growth for really long paths, + // just do various slices. + if (path.size() <= 5) { + register_shortest_path_with_limits(path, 0, path.size()); + return; + } + const size_t middle = path.size() / 2; + if (path.size() <= 10) { + register_shortest_path_with_limits(path, 0, middle); + register_shortest_path_with_limits(path, middle, path.size()); + register_edge(path[middle - 1], path[middle]); + return; + } + register_shortest_path_with_limits(path, 0, 5); + register_shortest_path_with_limits(path, path.size() - 5, path.size()); + if (path.size() >= 15) { + register_shortest_path_with_limits(path, middle - 2, middle + 3); + } +} + +void DistancesFromArchitecture::register_shortest_path_with_limits( + const std::vector& path, size_t begin, size_t end) { + for (size_t ii = begin; ii < end; ++ii) { + for (size_t jj = ii + 1; jj < end; ++jj) { + m_cached_distances[get_swap(path[ii], path[jj])] = jj - ii; + } + } +} + +void DistancesFromArchitecture::register_edge(size_t vertex1, size_t vertex2) { + m_cached_distances[get_swap(vertex1, vertex2)] = 1; +} + +size_t DistancesFromArchitecture::operator()(size_t vertex1, size_t vertex2) { + if (vertex1 == vertex2) { + return 0; + } + // Automatically set to zero if it doesn't exist yet. + auto& distance_entry = m_cached_distances[get_swap(vertex1, vertex2)]; + if (distance_entry == 0) { + const auto& arch = m_arch_mapping.get_architecture(); + distance_entry = arch.get_distance( + m_arch_mapping.get_node(vertex1), m_arch_mapping.get_node(vertex2)); + + // This message should no longer be triggered for disconnected + // architectures, since get_distance now should throw if v1, v2 are in + // different connected components. However, leave the check in, in case some + // other bizarre error causes distance zero to be returned. + // GCOVR_EXCL_START + TKET_ASSERT( + distance_entry > 0 || + AssertMessage() << "DistancesFromArchitecture: architecture has " + << arch.n_nodes() << " vertices, " + << arch.n_connections() << " edges; returned diameter " + << arch.get_diameter() << " and d(" << vertex1 << "," + << vertex2 + << ")=0. " + "Is the graph connected?"); + // GCOVR_EXCL_STOP + } + return distance_entry; +} + +} // namespace tket diff --git a/tket/src/Architecture/NeighboursFromArchitecture.cpp b/tket/src/Architecture/NeighboursFromArchitecture.cpp new file mode 100644 index 0000000000..3210e47944 --- /dev/null +++ b/tket/src/Architecture/NeighboursFromArchitecture.cpp @@ -0,0 +1,68 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "NeighboursFromArchitecture.hpp" + +#include +#include +#include + +namespace tket { + +NeighboursFromArchitecture::NeighboursFromArchitecture( + const ArchitectureMapping& arch_mapping) + : m_arch_mapping(arch_mapping) {} + +const std::vector& NeighboursFromArchitecture::operator()( + size_t vertex) { + const auto num_vertices = m_arch_mapping.number_of_vertices(); + // GCOVR_EXCL_START + TKET_ASSERT( + vertex < num_vertices || + AssertMessage() << "get_neighbours: invalid vertex " << vertex + << " (only have " << num_vertices << " vertices)"); + // GCOVR_EXCL_STOP + auto& neighbours = m_cached_neighbours[vertex]; + if (!neighbours.empty()) { + // Already cached. + return neighbours; + } + + // OK, if a vertex is isolated (has no neighbours) then this is wasteful; + // however this case should almost never occur in practice. + + const auto& source_node = m_arch_mapping.get_node(vertex); + const auto neighbour_nodes = + m_arch_mapping.get_architecture().get_neighbour_nodes(source_node); + + neighbours.reserve(neighbour_nodes.size()); + + for (const Node& node : neighbour_nodes) { + const auto neighbour_vertex = m_arch_mapping.get_vertex(node); + // GCOVR_EXCL_START + TKET_ASSERT( + neighbour_vertex != vertex || + AssertMessage() + << "get_neighbours: vertex " << vertex << " for node " + << node.repr() << " has " << neighbour_nodes.size() + << " neighbours, and lists itself as a neighbour (loops not " + "allowed)"); + // GCOVR_EXCL_STOP + neighbours.push_back(neighbour_vertex); + } + std::sort(neighbours.begin(), neighbours.end()); + return neighbours; +} + +} // namespace tket diff --git a/tket/src/Architecture/include/Architecture/Architecture.hpp b/tket/src/Architecture/include/Architecture/Architecture.hpp index dda4d10d74..8015634e50 100644 --- a/tket/src/Architecture/include/Architecture/Architecture.hpp +++ b/tket/src/Architecture/include/Architecture/Architecture.hpp @@ -29,6 +29,7 @@ #include "Utils/MatrixAnalysis.hpp" #include "Utils/TketLog.hpp" #include "Utils/UnitID.hpp" + namespace tket { extern template class graphs::DirectedGraphBase; @@ -101,10 +102,16 @@ class Architecture : public ArchitectureBase> { */ node_set_t get_articulation_points(const Architecture &subarc) const; + /** + * Returns true if the given operation acting on the given nodes + * can be executed on the Architecture connectivity graph. + */ + bool valid_operation(const std::vector &uids) const; + /** * Sub-architecture generated by a subset of nodes. */ - Architecture create_subarch(const std::vector &nodes); + Architecture create_subarch(const std::vector &nodes) const; /** * Vectors of nodes corresponding to lines of given lengths @@ -199,6 +206,8 @@ class SquareGrid : public Architecture { unsigned layers; }; +typedef std::shared_ptr ArchitecturePtr; + int tri_lexicographical_comparison( const dist_vec &dist1, const dist_vec &dist2); diff --git a/tket/src/Architecture/include/Architecture/ArchitectureMapping.hpp b/tket/src/Architecture/include/Architecture/ArchitectureMapping.hpp new file mode 100644 index 0000000000..63bcffdf1b --- /dev/null +++ b/tket/src/Architecture/include/Architecture/ArchitectureMapping.hpp @@ -0,0 +1,108 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Architecture/Architecture.hpp" +#include "TokenSwapping/SwapFunctions.hpp" + +namespace tket { + +/** Intended for use with TokenSwapping. + * For mapping between nodes in an architecture and size_t vertex numbers. + * The vertex numbers are merely the indices of each Node + * within the vector returned by the get_all_nodes() function. + * + * For now, we don't want to use Node objects as (1) this would make + * TokenSwapping dependent on other parts of Tket and hence less modular, + * (2) it would probably slow things down significantly because Nodes + * contain extra data, like vectors and strings, which are relatively + * expensive to copy; vertices get copied and moved around many times + * by any TSA. + * + * TODO: it would be better to use a Vertex wrapper class + * instead of raw size_t. (Also, might change to unsigned instead of size_t). + */ +class ArchitectureMapping { + public: + /** The object must remain valid and unchanged + * throughout the life of this object. + * @param arch The finished Architecture object, must remain valid + * for the lifetime of this object. + */ + explicit ArchitectureMapping(const Architecture& arch); + + /** If the architecture object was initialised with explicit edges, + * use these edges (rather than the Architecture nodes() function) + * to create the Node <-> size_t mapping, in a fixed way not dependent + * on Architecture (the reason being that Architecture does not guarantee + * the mapping, but if we change the labels then we change to an isomorphic + * but different token swapping problem, which messes up testing. + * In practice every implementation of token swapping, except for the ultimate + * probably exponential-time optimal algorithm, is going to depend + * on the labels. Even if we had a fast graph isomorphism routine, the labels + * would still not be uniquely determined, as they could be permuted). + * @param arch The finished Architecture object, must remain valid + * for the lifetime of this object. + * @param edges Edges originally used to construct the Architecture object. + * These will uniquely determine the internal Node <-> size_t mapping. + */ + ArchitectureMapping( + const Architecture& arch, + const std::vector>& edges); + + /** Convenient reference to the Architecture object we used + * to construct this ArchitectureMapping. + */ + const Architecture& get_architecture() const; + + /** The number of vertices in the Architecture. + * @return The number of vertices + */ + size_t number_of_vertices() const; + + /** Get the newly created vertex assigned to the node. + * Throws if the node is invalid. + * @param node The node within the original Architecture object + * @return The newly created vertex representing this node + */ + size_t get_vertex(const Node& node) const; + + /** Reverse of "get_vertex", throws if the vertex is invalid. + * @param vertex The vertex created by this ArchitectureMapping object. + * @return The node corresponding to this vertex. + */ + const Node& get_node(size_t vertex) const; + + /** Get the edges using the vertices created by this ArchitectureMapping + * object. The vertex numbers, of course, do not necessarily match with + * the Node uids of the underlying architecture object + * (that's why we have a mapping). + * @return The vector of edges in the architecture, using the new + * vertex numbers. + */ + std::vector get_edges() const; + + private: + /// Store a reference to the Architecture passed into the constructor. + const Architecture& m_arch; + + /// Element i is simply the node corresponding to vertex i. + node_vector_t m_vertex_to_node_mapping; + + /// Reverse of m_vertex_to_node_mapping; look up the index of a node. + std::map m_node_to_vertex_mapping; +}; + +} // namespace tket diff --git a/tket/src/Architecture/include/Architecture/BestTsaWithArch.hpp b/tket/src/Architecture/include/Architecture/BestTsaWithArch.hpp new file mode 100644 index 0000000000..262242b4d6 --- /dev/null +++ b/tket/src/Architecture/include/Architecture/BestTsaWithArch.hpp @@ -0,0 +1,64 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "ArchitectureMapping.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" + +namespace tket { + +/** A simple wrapper around BestFullTsa from TokenSwapping, + * using Architecture objects directly to find distances and neighbours. + */ +struct BestTsaWithArch { + /** Given the desired vertex mapping, a list + * of swaps (which may or may not be empty), and information about + * the architecture (the underlying graph), append extra swaps to it + * to produce the desired mapping. + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping. Will be updated with + * the new added swaps. + * @param arch_mapping An ArchitectureMapping object, which knows the graph, + * and how to do Node <-> vertex size_t conversions. + */ + static void append_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + const ArchitectureMapping& arch_mapping); + + /** This specifies desired source->target vertex mappings. + * Any nodes not occurring as a key might be moved by the algorithm. + */ + typedef std::map NodeMapping; + + /** Given an architecture and desired source->target node mapping, + * compute a sequence of swaps (attempts to be as short as possible) + * which will perform that mapping. + * Note that it may use ALL the nodes in the architecture, + * not just the ones occurring in the node_mapping. + * If you wish certain nodes to be fixed, specify them in the mapping + * (with equal source and target). + * (However, note that they might STILL be moved, as long as by the end + * they are back at the start. If you really don't to involve a particular + * node, you must remove it completely from the architecture). + * KNOWN BUG: it may give an error with disconnected architectures. + * @param architecture The raw object containing the graph. + * @param node_mapping The desired source->target node mapping. + * @return The required list of node pairs to swap. + */ + static std::vector> get_swaps( + const Architecture& architecture, const NodeMapping& node_mapping); +}; + +} // namespace tket diff --git a/tket/src/Architecture/include/Architecture/DistancesFromArchitecture.hpp b/tket/src/Architecture/include/Architecture/DistancesFromArchitecture.hpp new file mode 100644 index 0000000000..8f9f8ac2b0 --- /dev/null +++ b/tket/src/Architecture/include/Architecture/DistancesFromArchitecture.hpp @@ -0,0 +1,86 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "ArchitectureMapping.hpp" +#include "TokenSwapping/DistancesInterface.hpp" +#include "TokenSwapping/SwapFunctions.hpp" + +namespace tket { + +/** Directly get distances from an architecture object, + * but evaluated lazily. + */ +class DistancesFromArchitecture : public DistancesInterface { + public: + /** The ArchitectureMapping object already handles the Node <-> vertex size_t + * conversion. + * @param arch_mapping Object containing a reference to an Architecture, + * which has decided upon Node <-> vertex size_t conversions. + */ + explicit DistancesFromArchitecture(const ArchitectureMapping& arch_mapping); + + /** Get the distance from v1 to v2. Throws if distinct vertices return + * distance 0, which probably means a disconnected graph. + * @param vertex1 First vertex + * @param vertex2 Second vertex + * @return distance from v1 to v2 within the Architecture graph, throwing if + * they are disconnected (so the distance is +infinity). + */ + virtual size_t operator()(size_t vertex1, size_t vertex2) override; + + /** May save computation time later; by some method, the caller + * has determined a path from v1 to v2, and hence all along the path + * we know the distance between any two points. + * However, avoids quadratic time blowup by discarding some information + * for long paths. + * @param path A sequence [v0,v1, v2, ..., vn] of vertices, KNOWN to be a + * shortest path from v0 to vn. The caller must not call this without being + * SURE that it really is a shortest path, or incorrect results may occur. + */ + virtual void register_shortest_path(const std::vector& path) override; + + /** The caller has determined that v1, v2 are adjacent, and therefore + * the distance from v1 to v2 equals one. Store this. + * @param vertex1 First vertex + * @param vertex2 Second vertex + */ + virtual void register_edge(size_t vertex1, size_t vertex2) override; + + private: + /** Reference to the original object passed into the constructor; + * the caller must ensure that it remains valid and unchanged. + */ + const ArchitectureMapping& m_arch_mapping; + + /** The key is the vertex pair (v1, v2), but always sorted with v1 m_cached_distances; + + /** The main register_shortest_path wraps around this; we want to avoid + * quadratic timings growth by cutting off long paths. + * This stores the quadratic number of distances between all vertex pairs + * within the given subpath. + * @param path A sequence [v0,v1, v2, ..., vn] of vertices, + * KNOWN to be a shortest path from v0 to vn. + * @param begin The first index in path to use. + * @param end Like end(), an index one past the last index in path to use. + */ + void register_shortest_path_with_limits( + const std::vector& path, size_t begin, size_t end); +}; + +} // namespace tket diff --git a/tket/src/Architecture/include/Architecture/NeighboursFromArchitecture.hpp b/tket/src/Architecture/include/Architecture/NeighboursFromArchitecture.hpp new file mode 100644 index 0000000000..e4a833b591 --- /dev/null +++ b/tket/src/Architecture/include/Architecture/NeighboursFromArchitecture.hpp @@ -0,0 +1,49 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "ArchitectureMapping.hpp" +#include "TokenSwapping/NeighboursInterface.hpp" + +namespace tket { + +/** Stores and returns upon request the adjacent vertices to a given vertex + * on a graph, using an underlying Architecture object. + */ +class NeighboursFromArchitecture : public NeighboursInterface { + public: + /** The objects must remain valid AND unchanged + * for the lifetime of this object. + * @param arch_mapping An object which contains a reference to an + * Architecture object internally, and handles Node -> vertex size_t + * conversions. + */ + explicit NeighboursFromArchitecture(const ArchitectureMapping& arch_mapping); + + /** For extra convenience, the list of neighbours is always sorted + * in increasing order (so you can do binary search, etc.) + * @param vertex A vertex. + * @return A sorted list of all adjacent vertices, stored internally. + */ + virtual const std::vector& operator()(size_t vertex) override; + + private: + const ArchitectureMapping& m_arch_mapping; + + /** The key is the vertex, the value is the list of neighbours. */ + std::map> m_cached_neighbours; +}; + +} // namespace tket diff --git a/tket/src/CMakeLists.txt b/tket/src/CMakeLists.txt index 20050dc441..7dbc490a37 100644 --- a/tket/src/CMakeLists.txt +++ b/tket/src/CMakeLists.txt @@ -57,8 +57,11 @@ ELSEIF(APPLE) set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON) ENDIF() +# if you add new modules here make sure that it is added at the right position +# this list corresponds to a topological sorting of the dependency graph of the different modules list(APPEND TKET_COMPS Utils + TokenSwapping ZX OpType Clifford @@ -73,10 +76,11 @@ list(APPEND TKET_COMPS Program Characterisation Converters - Routing - MeasurementSetup - Transformations + Placement ArchAwareSynth + Mapping + MeasurementSetup + Transformations Predicates) foreach(COMP ${TKET_COMPS}) diff --git a/tket/src/Circuit/CircPool.cpp b/tket/src/Circuit/CircPool.cpp index e6b03fc771..93b69699f5 100644 --- a/tket/src/Circuit/CircPool.cpp +++ b/tket/src/Circuit/CircPool.cpp @@ -786,6 +786,130 @@ Circuit NPhasedX_using_CX( return c; } +static unsigned int_half(const Expr &angle) { + // Assume angle is an even integer + double eval = eval_expr(angle).value(); + return lround(eval / 2); +} + +Circuit tk1_to_rzsx(const Expr &alpha, const Expr &beta, const Expr &gamma) { + Circuit c(1); + Expr correction_phase = 0; + if (equiv_0(beta)) { + // b = 2k, if k is odd, then Rx(b) = -I + c.add_op(OpType::Rz, alpha + gamma, {0}); + correction_phase = int_half(beta); + } else if (equiv_0(beta + 1)) { + // Use Rx(2k-1) = i(-1)^{k}SxSx + correction_phase = -0.5 + int_half(beta - 1); + if (equiv_0(alpha - gamma)) { + // a - c = 2m + // overall operation is (-1)^{m}Rx(2k -1) + c.add_op(OpType::SX, {0}); + c.add_op(OpType::SX, {0}); + correction_phase += int_half(alpha - gamma); + } else { + c.add_op(OpType::Rz, gamma, {0}); + c.add_op(OpType::SX, {0}); + c.add_op(OpType::SX, {0}); + c.add_op(OpType::Rz, alpha, {0}); + } + } else if (equiv_0(beta - 0.5) && equiv_0(alpha) && equiv_0(gamma)) { + // a = 2k, b = 2m+0.5, c = 2n + // Rz(2k)Rx(2m + 0.5)Rz(2n) = (-1)^{k+m+n}e^{-i \pi /4} SX + c.add_op(OpType::SX, {0}); + correction_phase = + int_half(beta - 0.5) + int_half(alpha) + int_half(gamma) - 0.25; + } else if (equiv_0(alpha - 0.5) && equiv_0(gamma - 0.5)) { + // Rz(2k + 0.5)Rx(b)Rz(2m + 0.5) = -i(-1)^{k+m}SX.Rz(1-b).SX + c.add_op(OpType::SX, {0}); + c.add_op(OpType::Rz, 1 - beta, {0}); + c.add_op(OpType::SX, {0}); + correction_phase = int_half(alpha - 0.5) + int_half(gamma - 0.5) - 0.5; + } else { + c.add_op(OpType::Rz, gamma + 0.5, {0}); + c.add_op(OpType::SX, {0}); + c.add_op(OpType::Rz, beta - 1, {0}); + c.add_op(OpType::SX, {0}); + c.add_op(OpType::Rz, alpha + 0.5, {0}); + correction_phase = -0.5; + } + c.add_phase(correction_phase); + return c; +} + +Circuit tk1_to_rzh(const Expr &alpha, const Expr &beta, const Expr &gamma) { + Circuit c(1); + std::optional cliff = equiv_Clifford(beta, 4); + if (cliff) { + switch (*cliff % 4) { + case 0: { + c.add_op(OpType::Rz, gamma + alpha, {0}); + break; + } + case 1: { + c.add_op(OpType::Rz, gamma - 0.5, {0}); + c.add_op(OpType::H, {0}); + c.add_op(OpType::Rz, alpha - 0.5, {0}); + c.add_phase(-0.5); + break; + } + case 2: { + c.add_op(OpType::Rz, gamma - alpha, {0}); + c.add_op(OpType::H, {0}); + c.add_op(OpType::Rz, 1., {0}); + c.add_op(OpType::H, {0}); + break; + } + case 3: { + c.add_op(OpType::Rz, gamma + 0.5, {0}); + c.add_op(OpType::H, {0}); + c.add_op(OpType::Rz, alpha + 0.5, {0}); + c.add_phase(-0.5); + break; + } + } + if (cliff >= 4u) c.add_phase(1.); + } else { + c.add_op(OpType::Rz, gamma, {0}); + c.add_op(OpType::H, {0}); + c.add_op(OpType::Rz, beta, {0}); + c.add_op(OpType::H, {0}); + c.add_op(OpType::Rz, alpha, {0}); + } + return c; +} + +Circuit tk1_to_tk1(const Expr &alpha, const Expr &beta, const Expr &gamma) { + Circuit c(1); + c.add_op(OpType::TK1, {alpha, beta, gamma}, {0}); + return c; +} + +Circuit tk1_to_rzrx(const Expr &alpha, const Expr &beta, const Expr &gamma) { + Circuit c(1); + c.add_op(OpType::Rz, gamma, {0}); + c.add_op(OpType::Rx, beta, {0}); + c.add_op(OpType::Rz, alpha, {0}); + return c; +} + +Circuit tk1_to_PhasedXRz( + const Expr &alpha, const Expr &beta, const Expr &gamma) { + Circuit c(1); + if (equiv_expr(beta, 1)) { + // Angles β ∈ {π, 3π} + c.add_op(OpType::PhasedX, {beta, (alpha - gamma) / 2.}, {0}); + } else if (equiv_expr(beta, 0)) { + // Angle β ∈ {0, 2π} + c.add_op(OpType::Rz, alpha + beta + gamma, {0}); + } else { + c.add_op(OpType::Rz, alpha + gamma, {0}); + c.add_op(OpType::PhasedX, {beta, alpha}, {0}); + } + return c; +} + } // namespace CircPool } // namespace tket diff --git a/tket/src/Circuit/basic_circ_manip.cpp b/tket/src/Circuit/basic_circ_manip.cpp index 6d207711c8..3fc0376a32 100644 --- a/tket/src/Circuit/basic_circ_manip.cpp +++ b/tket/src/Circuit/basic_circ_manip.cpp @@ -50,12 +50,6 @@ void Circuit::remove_blank_wires() { for (const UnitID& u : unused_units) { boundary.get().erase(u); } - if (unit_bimaps_.initial && unit_bimaps_.final) { - for (const UnitID& u : unused_units) { - unit_bimaps_.initial->right.erase(u); - unit_bimaps_.final->right.erase(u); - } - } remove_vertices(bin, GraphRewiring::No, VertexDeletion::Yes); } @@ -315,7 +309,7 @@ void Circuit::remove_edge(const Edge& edge) { boost::remove_edge(edge, this->dag); } -void Circuit::flatten_registers() { +unit_map_t Circuit::flatten_registers() { unsigned q_index = 0; unsigned c_index = 0; boundary_t new_map; @@ -333,8 +327,7 @@ void Circuit::flatten_registers() { new_map.insert(new_el); } boundary = new_map; - update_initial_map(qmap); - update_final_map(qmap); + return qmap; } // this automatically updates the circuit boundaries @@ -384,21 +377,6 @@ void Circuit::add_qubit(const Qubit& id, bool reject_dups) { Vertex out = add_vertex(OpType::Output); add_edge({in, 0}, {out, 0}, EdgeType::Quantum); boundary.insert({id, in, out}); - - unit_bimap_t* ubmap_initial = unit_bimaps_.initial; - unit_bimap_t* ubmap_final = unit_bimaps_.final; - - if (ubmap_initial && ubmap_final) { - if (ubmap_initial->right.find(id) != ubmap_initial->right.end() || - ubmap_initial->left.find(id) != ubmap_initial->left.end() || - ubmap_final->right.find(id) != ubmap_final->right.end() || - ubmap_final->left.find(id) != ubmap_final->left.end()) { - throw CircuitInvalidity( - "A unit with ID \"" + id.repr() + "\" already exists"); - } - ubmap_initial->left.insert({id, id}); - ubmap_final->left.insert({id, id}); - } } void Circuit::add_bit(const Bit& id, bool reject_dups) { diff --git a/tket/src/Circuit/include/Circuit/CircPool.hpp b/tket/src/Circuit/include/Circuit/CircPool.hpp index f07fe6620f..5d874fb130 100644 --- a/tket/src/Circuit/include/Circuit/CircPool.hpp +++ b/tket/src/Circuit/include/Circuit/CircPool.hpp @@ -225,6 +225,18 @@ Circuit PhasedISWAP_using_CX(Expr p, Expr t); /** Unwrap NPhasedX, into number_of_qubits PhasedX gates */ Circuit NPhasedX_using_CX(unsigned int number_of_qubits, Expr alpha, Expr beta); +// converts a TK1 gate to a PhasedXRz gate +Circuit tk1_to_PhasedXRz( + const Expr &alpha, const Expr &beta, const Expr &gamma); + +Circuit tk1_to_rzrx(const Expr &alpha, const Expr &beta, const Expr &gamma); + +Circuit tk1_to_rzh(const Expr &alpha, const Expr &beta, const Expr &gamma); + +Circuit tk1_to_rzsx(const Expr &alpha, const Expr &beta, const Expr &gamma); + +Circuit tk1_to_tk1(const Expr &alpha, const Expr &beta, const Expr &gamma); + } // namespace CircPool } // namespace tket diff --git a/tket/src/Circuit/include/Circuit/Circuit.hpp b/tket/src/Circuit/include/Circuit/Circuit.hpp index 32139874c3..be5ac25918 100644 --- a/tket/src/Circuit/include/Circuit/Circuit.hpp +++ b/tket/src/Circuit/include/Circuit/Circuit.hpp @@ -365,7 +365,7 @@ class Circuit { /** * Construct an empty circuit. */ - Circuit() : unit_bimaps_{nullptr, nullptr}, phase(0) {} + Circuit() : phase(0) {} /** * Construct an empty named circuit. @@ -564,6 +564,16 @@ class Circuit { std::vector get_b_out_bundles( const Vertex &vert) const; // returned by port no. + /** + * All bundles of in Boolean edges, ordered by port number + * + * Each member of the list is a list of edges sharing the same port + * + * @param vert vertex + */ + std::vector get_b_in_bundles( + const Vertex &vert) const; // returned by port no. + /** * Total number of inward edges * @@ -636,6 +646,11 @@ class Circuit { const std::optional &get_opgroup_from_Vertex( const Vertex &vert) const; + /** + * Get the set of all opgroup names. + */ + const std::unordered_set get_opgroups() const; + // O(1) (lookup in hashtable) OpDesc get_OpDesc_from_Vertex(const Vertex &vert) const; OpType get_OpType_from_Vertex(const Vertex &vert) const; @@ -680,7 +695,12 @@ class Circuit { // O(1) bool detect_singleq_unitary_op(const Vertex &vert) const; - void flatten_registers(); + /** + * Convert all quantum and classical bits to use default registers. + * + * @return mapping from old to new unit IDs + */ + unit_map_t flatten_registers(); //_________________________________________________ @@ -946,18 +966,6 @@ class Circuit { template bool rename_units(const std::map &qm); - /** - * When either inputs or outputs are relabelled, this method can be called - * to update the parent CompilationUnit's initial/final map (if such a - * parent exists). - * - * @param qm partial relabelling map from current ids to new ids - */ - template - void update_initial_map(const std::map &qm); - template - void update_final_map(const std::map &qm); - /** Automatically rewire holes when removing vertices from the circuit? */ enum class GraphRewiring { Yes, No }; @@ -1066,6 +1074,11 @@ class Circuit { std::shared_ptr b_frontier, const std::function &skip_func) const; + // given current slice of quantum frontier, returns the next slice. + // ignore classical and boolean edges + CutFrontier next_q_cut( + std::shared_ptr u_frontier) const; + /** * Depth of circuit. * @@ -1382,6 +1395,13 @@ class Circuit { */ Circuit conditional_circuit(const bit_vector_t &bits, unsigned value) const; + /** + * Replaces one vertex by applying \ref Box::to_circuit + * + * @return whether the vertex holds a box or a conditional box + */ + bool substitute_box_vertex(Vertex &vert, VertexDeletion vertex_deletion); + /** * Replaces each \ref Box operation by applying \ref Box::to_circuit * @@ -1509,16 +1529,6 @@ class Circuit { DAG dag; /** Representation as directed graph */ boundary_t boundary; - /** - * Pointers to unit maps - * - * These are used by compilation units. - */ - struct { - unit_bimap_t *initial; - unit_bimap_t *final; - } unit_bimaps_; - private: std::optional name; /** optional string name descriptor for human identification*/ @@ -1576,63 +1586,9 @@ bool Circuit::rename_units(const std::map &qm) { "Unit already exists in circuit: " + pair.first.repr()); TKET_ASSERT(modified); } - update_initial_map(qm); - update_final_map(qm); return modified; } -template -void Circuit::update_initial_map(const std::map &qm) { - // Can only work for Unit classes - static_assert(std::is_base_of::value); - static_assert(std::is_base_of::value); - // Unit types must be related, so cannot rename e.g. Bits to Qubits - static_assert( - std::is_base_of::value || - std::is_base_of::value); - unit_bimap_t *ubmap = unit_bimaps_.initial; - if (ubmap) { - unit_map_t new_initial_map; - for (const std::pair &pair : qm) { - const auto &it = ubmap->right.find(pair.first); - if (it == ubmap->right.end()) { - continue; - } - new_initial_map.insert({it->second, pair.second}); - ubmap->right.erase(pair.first); - } - for (const std::pair &pair : new_initial_map) { - ubmap->left.insert(pair); - } - } -} - -template -void Circuit::update_final_map(const std::map &qm) { - // Can only work for Unit classes - static_assert(std::is_base_of::value); - static_assert(std::is_base_of::value); - // Unit types must be related, so cannot rename e.g. Bits to Qubits - static_assert( - std::is_base_of::value || - std::is_base_of::value); - unit_bimap_t *ubmap = unit_bimaps_.final; - if (ubmap) { - unit_map_t new_final_map; - for (const std::pair &pair : qm) { - const auto &it = ubmap->right.find(pair.first); - if (it == ubmap->right.end()) { - continue; - } - new_final_map.insert({it->second, pair.second}); - ubmap->right.erase(pair.first); - } - for (const std::pair &pair : new_final_map) { - ubmap->left.insert(pair); - } - } -} - template <> Vertex Circuit::add_op( const Op_ptr &op, const std::vector &args, diff --git a/tket/src/Circuit/macro_circ_info.cpp b/tket/src/Circuit/macro_circ_info.cpp index 520d36a412..25f1b9a37a 100644 --- a/tket/src/Circuit/macro_circ_info.cpp +++ b/tket/src/Circuit/macro_circ_info.cpp @@ -185,9 +185,11 @@ Circuit Circuit::subcircuit(const Subcircuit& sc) const { // used to construct a routing grid QPathDetailed Circuit::unit_path(const UnitID& unit) const { Vertex current_v = get_in(unit); + QPathDetailed path = {{current_v, 0}}; Edge betweenEdge = get_nth_out_edge(current_v, 0); current_v = target(betweenEdge); + while (detect_final_Op(current_v) == false) { if (n_out_edges(current_v) == 0) { throw CircuitInvalidity("A path ends before reaching an output vertex."); @@ -515,6 +517,43 @@ CutFrontier Circuit::next_cut( get_next_b_frontier(*this, b_frontier, u_frontier, next_slice_lookup)}; } +CutFrontier Circuit::next_q_cut( + std::shared_ptr u_frontier) const { + auto next_slice = std::make_shared(); + VertexSet next_slice_lookup; + VertexSet bad_vertices; + EdgeSet edge_lookup; + for (const std::pair& pair : u_frontier->get()) { + edge_lookup.insert(pair.second); + } + + // find the next slice first + for (const std::pair& pair : u_frontier->get()) { + Vertex try_v = target(pair.second); + if (detect_final_Op(try_v)) continue; + if (next_slice_lookup.contains(try_v)) + continue; // already going to be in next slice + bool good_vertex = !bad_vertices.contains(try_v); + if (!good_vertex) continue; + EdgeVec ins = get_in_edges(try_v); + for (const Edge& in : ins) { + if (!edge_lookup.contains(in) && get_edgetype(in) == EdgeType::Quantum) { + good_vertex = false; + bad_vertices.insert(try_v); + break; + } + } + if (good_vertex) { + next_slice_lookup.insert(try_v); + next_slice->push_back(try_v); + } + } + + return { + next_slice, get_next_u_frontier(*this, u_frontier, next_slice_lookup), + std::make_shared()}; +} + SliceVec Circuit::get_reverse_slices() const { vertex_map_t mapping; vertex_map_t rev_mapping; diff --git a/tket/src/Circuit/macro_manipulation.cpp b/tket/src/Circuit/macro_manipulation.cpp index 3f6c587cbd..c587b38fa2 100644 --- a/tket/src/Circuit/macro_manipulation.cpp +++ b/tket/src/Circuit/macro_manipulation.cpp @@ -629,27 +629,34 @@ Circuit Circuit::conditional_circuit( return cond_circ; } +bool Circuit::substitute_box_vertex( + Vertex& vert, VertexDeletion vertex_deletion) { + Op_ptr op = get_Op_ptr_from_Vertex(vert); + bool conditional = op->get_type() == OpType::Conditional; + if (conditional) { + const Conditional& cond = static_cast(*op); + op = cond.get_op(); + } + if (!op->get_desc().is_box()) return false; + const Box& b = static_cast(*op); + Circuit replacement = *b.to_circuit(); + if (conditional) { + substitute_conditional( + replacement, vert, vertex_deletion, OpGroupTransfer::Merge); + } else { + substitute(replacement, vert, vertex_deletion, OpGroupTransfer::Merge); + } + return true; +} + bool Circuit::decompose_boxes() { bool success = false; VertexList bin; BGL_FORALL_VERTICES(v, dag, DAG) { - Op_ptr op = get_Op_ptr_from_Vertex(v); - bool conditional = op->get_type() == OpType::Conditional; - if (conditional) { - const Conditional& cond = static_cast(*op); - op = cond.get_op(); - } - if (!op->get_desc().is_box()) continue; - const Box& b = static_cast(*op); - Circuit replacement = *b.to_circuit(); - if (conditional) { - substitute_conditional( - replacement, v, VertexDeletion::No, OpGroupTransfer::Merge); - } else { - substitute(replacement, v, VertexDeletion::No, OpGroupTransfer::Merge); + if (substitute_box_vertex(v, VertexDeletion::No)) { + bin.push_back(v); + success = true; } - bin.push_back(v); - success = true; } remove_vertices(bin, GraphRewiring::No, VertexDeletion::Yes); return success; diff --git a/tket/src/Circuit/setters_and_getters.cpp b/tket/src/Circuit/setters_and_getters.cpp index bc075860bb..395e6a8cec 100644 --- a/tket/src/Circuit/setters_and_getters.cpp +++ b/tket/src/Circuit/setters_and_getters.cpp @@ -472,6 +472,21 @@ std::vector Circuit::get_b_out_bundles(const Vertex &vert) const { return bundles; } +std::vector Circuit::get_b_in_bundles(const Vertex &vert) const { + unsigned n = n_ports(vert); + std::vector bundles(n); + BGL_FORALL_INEDGES(vert, e, dag, DAG) { + if (get_edgetype(e) == EdgeType::Boolean) { + port_t port = get_target_port(e); + if (port > n) { + throw CircuitInvalidity("Vertex has an output on an unexpected port"); + } + bundles.at(port).push_back(e); + } + } + return bundles; +} + // n represents the port of the edge at vert_from // there are no checks to ensure the vertex exists in the graph // will only return Quantum or Classical edges @@ -558,6 +573,17 @@ const std::optional &Circuit::get_opgroup_from_Vertex( return this->dag[vert].opgroup; } +const std::unordered_set Circuit::get_opgroups() const { + std::unordered_set opgroups; + BGL_FORALL_VERTICES(v, dag, DAG) { + std::optional v_opgroup = get_opgroup_from_Vertex(v); + if (v_opgroup) { + opgroups.insert(v_opgroup.value()); + } + } + return opgroups; +} + void Circuit::set_vertex_Op_ptr(const Vertex &vert, const Op_ptr &op) { this->dag[vert].op = op; } diff --git a/tket/src/Gate/GateUnitarySparseMatrix.cpp b/tket/src/Gate/GateUnitarySparseMatrix.cpp index 00a4c1a735..233ad4bf0c 100644 --- a/tket/src/Gate/GateUnitarySparseMatrix.cpp +++ b/tket/src/Gate/GateUnitarySparseMatrix.cpp @@ -153,12 +153,13 @@ std::vector GateUnitarySparseMatrix::get_unitary_triplets( return convert_1qb_type_to_controlled_type_and_get_triplets( gate, primitive_type, abs_epsilon); } catch (const GateUnitaryMatrixError& e) { - std::stringstream ss; - OpDesc desc(primitive_type); - ss << "Converting " << gate.get_name() - << " to sparse unitary, via adding controls to gate type " - << desc.name() << ": " << e.what(); - throw GateUnitaryMatrixError(ss.str(), e.cause); + // GCOVR_EXCL_START + TKET_ASSERT( + AssertMessage() + << "Converting " << gate.get_name() + << " to sparse unitary, via adding controls to gate type " + << OpDesc(primitive_type).name() << ": " << e.what()); + // GCOVR_EXCL_STOP } } return get_triplets_for_noncontrolled_gate(gate); diff --git a/tket/src/Gate/Rotation.cpp b/tket/src/Gate/Rotation.cpp index 7f07caebcc..c4c583766a 100644 --- a/tket/src/Gate/Rotation.cpp +++ b/tket/src/Gate/Rotation.cpp @@ -442,6 +442,9 @@ std::vector tk1_angles_from_unitary(const Eigen::Matrix2cd &U) { } else { // general case // s0^2 + z0^2 - x0^2 - y0^2 = cos(pi b) double t = s0 * s0 + z0 * z0 - x0 * x0 - y0 * y0; + // Rounding errors may mean t is outside the domain of acos. Fix this. + if (t > +1.) t = +1.; + if (t < -1.) t = -1.; b = std::acos(t) / PI; // w.l.o.g. b is in the range (-1,+1). diff --git a/tket/src/Graphs/AdjacencyData.cpp b/tket/src/Graphs/AdjacencyData.cpp index 67237dcb65..8ed27845b2 100644 --- a/tket/src/Graphs/AdjacencyData.cpp +++ b/tket/src/Graphs/AdjacencyData.cpp @@ -19,6 +19,8 @@ #include #include +#include "Utils/Assert.hpp" + using std::exception; using std::map; using std::runtime_error; @@ -63,13 +65,14 @@ string AdjacencyData::to_string() const { const set& AdjacencyData::get_neighbours( std::size_t vertex) const { - if (vertex >= m_cleaned_data.size()) { - stringstream ss; - ss << "AdjacencyData: get_neighbours called with invalid vertex " << vertex - << "; there are only " << m_cleaned_data.size() << " vertices"; - - throw runtime_error(ss.str()); - } + // GCOVR_EXCL_START + TKET_ASSERT( + vertex < m_cleaned_data.size() || + AssertMessage() + << "AdjacencyData: get_neighbours called with invalid vertex " + << vertex << "; there are only " << m_cleaned_data.size() + << " vertices"); + // GCOVR_EXCL_STOP return m_cleaned_data[vertex]; } @@ -90,28 +93,23 @@ std::size_t AdjacencyData::get_number_of_edges() const { } bool AdjacencyData::add_edge(std::size_t i, std::size_t j) { - try { - const bool exists = edge_exists(i, j); - if (exists) { - return false; - } - m_cleaned_data[i].insert(j); - m_cleaned_data[j].insert(i); - return true; - } catch (const exception& e) { - stringstream ss; - ss << "add_edge : " << e.what(); - throw runtime_error(ss.str()); + const bool exists = edge_exists(i, j); + if (exists) { + return false; } + m_cleaned_data[i].insert(j); + m_cleaned_data[j].insert(i); + return true; } bool AdjacencyData::edge_exists(std::size_t i, std::size_t j) const { - if (i >= m_cleaned_data.size() || j >= m_cleaned_data.size()) { - stringstream ss; - ss << "AdjacencyData: edge_exists called with vertices " << i << ", " << j - << ", but there are only " << m_cleaned_data.size() << " vertices"; - throw runtime_error(ss.str()); - } + // GCOVR_EXCL_START + TKET_ASSERT( + (i < m_cleaned_data.size() && j < m_cleaned_data.size()) || + AssertMessage() << "edge_exists called with vertices " << i << ", " << j + << ", but there are only " << m_cleaned_data.size() + << " vertices"); + // GCOVR_EXCL_STOP return m_cleaned_data[i].count(j) != 0; } @@ -136,16 +134,10 @@ AdjacencyData::AdjacencyData( } } m_cleaned_data.resize(number_of_vertices); - try { - for (const auto& entry : raw_data) { - for (std::size_t neighbour : entry.second) { - add_edge(entry.first, neighbour); - } + for (const auto& entry : raw_data) { + for (std::size_t neighbour : entry.second) { + add_edge(entry.first, neighbour); } - } catch (const exception& e) { - stringstream ss; - ss << "AdjacencyData: constructing from map:" << e.what(); - throw runtime_error(ss.str()); } } @@ -153,28 +145,21 @@ AdjacencyData::AdjacencyData( const vector>& raw_data, bool allow_loops) { m_cleaned_data.resize(raw_data.size()); - try { - for (std::size_t i = 0; i < raw_data.size(); ++i) { - for (std::size_t j : raw_data[i]) { - if (i == j && !allow_loops) { - stringstream ss; - ss << "vertex " << i << " has a loop."; - throw runtime_error(ss.str()); - } - if (j > raw_data.size()) { - stringstream ss; - ss << "vertex " << i << " has illegal neighbour vertex " << j; - throw runtime_error(ss.str()); - } - m_cleaned_data[i].insert(j); - m_cleaned_data[j].insert(i); - } + for (std::size_t i = 0; i < m_cleaned_data.size(); ++i) { + for (std::size_t j : raw_data[i]) { + // GCOVR_EXCL_START + TKET_ASSERT( + i != j || allow_loops || + AssertMessage() << "Vertex " << i << " out of " + << m_cleaned_data.size() << " has a loop."); + TKET_ASSERT( + j < m_cleaned_data.size() || + AssertMessage() << "Vertex " << i << " has illegal neighbour vertex " + << j << ", the size is " << m_cleaned_data.size()); + // GCOVR_EXCL_STOP + m_cleaned_data[i].insert(j); + m_cleaned_data[j].insert(i); } - } catch (const exception& e) { - stringstream ss; - ss << "AdjacencyData: we have " << raw_data.size() - << " vertices: " << e.what(); - throw runtime_error(ss.str()); } } diff --git a/tket/src/Graphs/BruteForceColouring.cpp b/tket/src/Graphs/BruteForceColouring.cpp index 546359498f..59c9a49bbf 100644 --- a/tket/src/Graphs/BruteForceColouring.cpp +++ b/tket/src/Graphs/BruteForceColouring.cpp @@ -215,13 +215,15 @@ BruteForceColouring::BruteForceColouring( } throw std::runtime_error("suggested_number_of_colours hit number_of_nodes"); } catch (const std::exception& e) { - std::stringstream ss; - ss << "BruteForceColouring: initial_suggested_number_of_colours = " - << initial_suggested_number_of_colours - << ", reached suggested_number_of_colours = " - << suggested_number_of_colours << ", had " << number_of_nodes - << " nodes. Error: " << e.what() << priority.print_raw_data(); - throw std::runtime_error(ss.str()); + // GCOVR_EXCL_START + TKET_ASSERT( + AssertMessage() << "initial_suggested_number_of_colours = " + << initial_suggested_number_of_colours + << ", reached suggested_number_of_colours = " + << suggested_number_of_colours << ", had " + << number_of_nodes << " nodes. Error: " << e.what() + << priority.print_raw_data()); + // GCOVR_EXCL_STOP } } diff --git a/tket/src/Graphs/ColouringPriority.cpp b/tket/src/Graphs/ColouringPriority.cpp index 2dcb98cc9d..5dee5e62b0 100644 --- a/tket/src/Graphs/ColouringPriority.cpp +++ b/tket/src/Graphs/ColouringPriority.cpp @@ -18,6 +18,7 @@ #include #include "AdjacencyData.hpp" +#include "Utils/Assert.hpp" using std::map; using std::set; @@ -37,12 +38,14 @@ static void fill_initial_node_sequence( try { for (size_t clique_vertex : initial_clique) { + // GCOVR_EXCL_START if (vertices_in_component.count(clique_vertex) == 0) { std::stringstream ss; ss << "initial clique vertex " << clique_vertex << " is not in this component"; throw std::runtime_error(ss.str()); } + // GCOVR_EXCL_STOP nodes.emplace_back(); nodes.back().vertex = clique_vertex; } @@ -76,21 +79,25 @@ static void fill_initial_node_sequence( vertices_to_add.clear(); current_nodes_begin = current_nodes_end; } + // GCOVR_EXCL_START if (nodes.size() != vertices_in_component.size()) { throw std::runtime_error( "Final size check: number of filled " "nodes does not match number of vertices in this component"); } + // GCOVR_EXCL_STOP } catch (const std::exception& e) { - std::stringstream ss; - ss << "ColouringPriority: fill_initial_node_sequence: initial" - << " clique size " << initial_clique.size() << ", " - << vertices_in_component.size() << " vertices in" - << " this component (full graph has " - << adjacency_data.get_number_of_vertices() << " vertices)." - << " So far, filled " << nodes.size() << " nodes." - << " Error: " << e.what(); - throw std::runtime_error(ss.str()); + // GCOVR_EXCL_START + TKET_ASSERT( + AssertMessage() + << "ColouringPriority: fill_initial_node_sequence: initial" + << " clique size " << initial_clique.size() << ", " + << vertices_in_component.size() << " vertices in" + << " this component (full graph has " + << adjacency_data.get_number_of_vertices() << " vertices)." + << " So far, filled " << nodes.size() << " nodes." + << " Error: " << e.what()); + // GCOVR_EXCL_STOP } } @@ -115,6 +122,8 @@ const ColouringPriority::Nodes& ColouringPriority::get_nodes() const { return m_nodes; } +// GCOVR_EXCL_START +// currently used only within a tket assert macro string ColouringPriority::print_raw_data(bool relabel_to_simplify) const { map old_vertex_to_new_vertex; if (relabel_to_simplify) { @@ -168,6 +177,7 @@ string ColouringPriority::print_raw_data(bool relabel_to_simplify) const { ss << "\n};\n\n"; return ss.str(); } +// GCOVR_EXCL_STOP ColouringPriority::ColouringPriority( const AdjacencyData& adjacency_data, diff --git a/tket/src/Graphs/GraphColouring.cpp b/tket/src/Graphs/GraphColouring.cpp index 2e88ec99bd..11f3da98a4 100644 --- a/tket/src/Graphs/GraphColouring.cpp +++ b/tket/src/Graphs/GraphColouring.cpp @@ -24,6 +24,7 @@ #include "ColouringPriority.hpp" #include "GraphRoutines.hpp" #include "LargeCliquesResult.hpp" +#include "Utils/Assert.hpp" using std::exception; using std::map; @@ -80,6 +81,7 @@ static void colour_single_component( const auto& colour = entry.second; result.number_of_colours = std::max(result.number_of_colours, colour + 1); + // GCOVR_EXCL_START try { if (vertex >= result.colours.size()) { throw runtime_error("illegal vertex index"); @@ -92,12 +94,12 @@ static void colour_single_component( } colour_to_assign = colour; } catch (const exception& e) { - stringstream ss; - ss << "colouring single component " << component_index - << " returned vertex " << vertex << " with colour " << colour << " : " - << e.what(); - throw runtime_error(ss.str()); + TKET_ASSERT( + AssertMessage() << "colouring single component " << component_index + << " returned vertex " << vertex << " with colour " + << colour << " : " << e.what()); } + // GCOVR_EXCL_STOP } } @@ -109,11 +111,13 @@ static void check_final_colouring(GraphColouringResult& result) { result.number_of_colours = 0; for (std::size_t i = 0; i < result.colours.size(); ++i) { const auto colour = result.colours[i]; + // GCOVR_EXCL_START if (colour >= result.colours.size()) { stringstream ss; ss << "vertex " << i << " has unassigned or illegal colour " << colour; throw runtime_error(ss.str()); } + // GCOVR_EXCL_STOP result.number_of_colours = std::max(result.number_of_colours, colour + 1); } } @@ -130,12 +134,14 @@ GraphColouringResult GraphColouringRoutines::get_colouring( const LargeCliquesResult cliques_in_this_component( adjacency_data, connected_components[i]); + // GCOVR_EXCL_START if (cliques_in_this_component.cliques.empty()) { stringstream ss; ss << "component " << i << " has " << connected_components[i].size() << " vertices, but couldn't find a clique!"; throw runtime_error(ss.str()); } + // GCOVR_EXCL_STOP cliques[i] = cliques_in_this_component.cliques[0]; component_indices[i] = i; } @@ -163,12 +169,16 @@ GraphColouringResult GraphColouringRoutines::get_colouring( check_final_colouring(result); return result; } catch (const exception& e) { - stringstream ss; - ss << "GraphColouringRoutines::get_colouring: we had " - << connected_components.size() << " connected components, " - << adjacency_data.get_number_of_vertices() - << " vertices in total: " << e.what(); - throw runtime_error(ss.str()); + // GCOVR_EXCL_START + TKET_ASSERT( + AssertMessage() << "We had " << connected_components.size() + << " connected components, " + << adjacency_data.get_number_of_vertices() + << " vertices in total: " << e.what()); + // Some compilers error with "non-void function does not + // return a value in all control paths..." + return GraphColouringResult(); + // GCOVR_EXCL_STOP } } diff --git a/tket/src/Graphs/include/Graphs/AbstractGraph.hpp b/tket/src/Graphs/include/Graphs/AbstractGraph.hpp index 4b27796872..b5f6373073 100644 --- a/tket/src/Graphs/include/Graphs/AbstractGraph.hpp +++ b/tket/src/Graphs/include/Graphs/AbstractGraph.hpp @@ -53,6 +53,11 @@ class AbstractGraph { /** Check if an edge exists between two nodes */ virtual bool edge_exists(const T &node1, const T &node2) const = 0; + /** Check if an edge exists between two nodes */ + bool bidirectional_edge_exists(const T &node1, const T &node2) const { + return (edge_exists(node1, node2) || edge_exists(node2, node1)); + } + /** Check if a node exists */ bool node_exists(const T &node) const { return nodes_.contains(node); } diff --git a/tket/src/Mapping/AASLabelling.cpp b/tket/src/Mapping/AASLabelling.cpp new file mode 100644 index 0000000000..152530f7f6 --- /dev/null +++ b/tket/src/Mapping/AASLabelling.cpp @@ -0,0 +1,117 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Mapping/AASLabelling.hpp" + +namespace tket { + +std::pair AASLabellingMethod::routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + bool found_unplaced_qubit = false; + bool found_unplaced_ppb = false; + + // search for unplaced qubitto speed up the runtime + for (Qubit q : mapping_frontier->circuit_.all_qubits()) { + if (!architecture->node_exists(Node(q))) { + found_unplaced_qubit = true; + break; + } + } + if (found_unplaced_qubit) { + std::shared_ptr next_frontier = + frontier_convert_vertport_to_edge( + mapping_frontier->circuit_, mapping_frontier->linear_boundary); + + CutFrontier next_cut = mapping_frontier->circuit_.next_cut( + next_frontier, std::make_shared()); + + for (const Vertex& v : *next_cut.slice) { + if (mapping_frontier->circuit_.get_OpType_from_Vertex(v) == + OpType::PhasePolyBox) { + TKET_ASSERT(mapping_frontier->circuit_.is_quantum_node(v)); + Op_ptr op_ptr_ppb = + mapping_frontier->circuit_.get_Op_ptr_from_Vertex(v); + + for (const Edge& e : mapping_frontier->circuit_.get_in_edges_of_type( + v, EdgeType::Quantum)) { + for (const std::pair& pair : + next_frontier->get()) { + if (pair.second == e) { + if (!architecture->node_exists(Node(pair.first))) { + found_unplaced_ppb = true; + } + } + } + } + } + } + } + + if (!found_unplaced_ppb) { + return {false, {}}; + } else { + qubit_vector_t q_vec = mapping_frontier->circuit_.all_qubits(); + unit_map_t qubit_to_nodes_place; + node_set_t node_set_placed; + + for (Qubit q : q_vec) { + if (architecture->node_exists(Node(q))) { + qubit_to_nodes_place.insert({q, Node(q)}); + node_set_placed.insert(Node(q)); + } + } + + node_vector_t nodes_vec = architecture->get_all_nodes_vec(); + + // place all unplaced qubits + + for (Qubit q : q_vec) { + if (!architecture->node_exists(Node(q))) { + // found unplaced qubit + // other checks could be added here to avoid placing unused qubits or + // qubits that are not in an ppb + + unsigned index_to_use = 0; + while (node_set_placed.find(nodes_vec[index_to_use]) != + node_set_placed.end()) { + ++index_to_use; + } + qubit_to_nodes_place.insert({q, nodes_vec[index_to_use]}); + node_set_placed.insert(nodes_vec[index_to_use]); + mapping_frontier->update_bimaps( + mapping_frontier->get_qubit_from_circuit_uid(q), + nodes_vec[index_to_use]); + } + } + + mapping_frontier->update_linear_boundary_uids(qubit_to_nodes_place); + mapping_frontier->circuit_.rename_units(qubit_to_nodes_place); + + return {true, {}}; + } +} + +nlohmann::json AASLabellingMethod::serialize() const { + nlohmann::json j; + j["name"] = "AASLabellingMethod"; + return j; +} + +AASLabellingMethod AASLabellingMethod::deserialize( + const nlohmann::json& /*j*/) { + return AASLabellingMethod(); +} + +} // namespace tket diff --git a/tket/src/Mapping/AASRoute.cpp b/tket/src/Mapping/AASRoute.cpp new file mode 100644 index 0000000000..194d7fc58e --- /dev/null +++ b/tket/src/Mapping/AASRoute.cpp @@ -0,0 +1,151 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Mapping/AASRoute.hpp" + +namespace tket { + +AASRouteRoutingMethod::AASRouteRoutingMethod( + unsigned _aaslookahead, aas::CNotSynthType _cnotsynthtype) + : cnotsynthtype_(_cnotsynthtype), aaslookahead_(_aaslookahead) {} + +std::pair AASRouteRoutingMethod::routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + std::shared_ptr next_frontier = + frontier_convert_vertport_to_edge( + mapping_frontier->circuit_, mapping_frontier->linear_boundary); + + CutFrontier next_cut = mapping_frontier->circuit_.next_cut( + next_frontier, std::make_shared()); + + // search for ppb in slice + for (const Vertex& v : *next_cut.slice) { + if (mapping_frontier->circuit_.get_OpType_from_Vertex(v) == + OpType::PhasePolyBox) { + TKET_ASSERT(mapping_frontier->circuit_.is_quantum_node(v)); + + unsigned number_of_qubits = mapping_frontier->circuit_.n_in_edges(v); + unit_vector_t qubit_vec(number_of_qubits); + + // check all qubits of the ppb if they are placed + bool box_placed = true; + for (unsigned i = 0; i < number_of_qubits; ++i) { + const Edge& e = mapping_frontier->circuit_.get_nth_in_edge(v, i); + for (const std::pair& pair : + next_frontier->get()) { + if (pair.second == e) { + qubit_vec[i] = Qubit(pair.first); + if (!architecture->node_exists(Node(pair.first))) { + box_placed = false; + } + } + } + } + + // check that the box we are working on is really placed and the check + // method has been executed + // this is imporant if the circuit contains more than one ppb and only one + // of them is placed + + if (box_placed) { + // get ppb from op + Op_ptr op_ptr_ppb = + mapping_frontier->circuit_.get_Op_ptr_from_Vertex(v); + const PhasePolyBox& ppb = static_cast(*op_ptr_ppb); + + // Circuit circuit_ppb_place(*ppb.to_circuit()); + + // generate aritecture to make sure that the result can be inserted into + // the given circuit by using flatten_registers + auto nodes_vec = architecture->get_all_nodes_vec(); + auto edges_vec = architecture->get_all_edges_vec(); + + // create maps from qubits/node to int + std::map orig_node_to_int_node; + std::map orig_qubit_to_int_node; + + unsigned id_node = 0; + + unsigned n_nodes = architecture->n_nodes(); + + for (Node orig_node : nodes_vec) { + orig_node_to_int_node.insert({orig_node, Node(n_nodes)}); + } + + for (auto orig_qubit : qubit_vec) { + orig_node_to_int_node[orig_qubit] = Node(id_node); + ++id_node; + } + + for (Node orig_node : nodes_vec) { + if (orig_node_to_int_node[orig_node] == Node(n_nodes)) { + orig_node_to_int_node[orig_node] = Node(id_node); + ++id_node; + } + } + + // define new arcitecture with int nodes for ppb + std::vector new_con; + for (auto pair : architecture->get_all_edges_vec()) { + new_con.push_back( + {orig_node_to_int_node[UnitID(pair.first)], + orig_node_to_int_node[UnitID(pair.second)]}); + } + + Architecture new_int_arch = Architecture(new_con); + + TKET_ASSERT(architecture->n_nodes() == new_int_arch.n_nodes()); + + Circuit result = aas::phase_poly_synthesis( + new_int_arch, ppb, aaslookahead_, cnotsynthtype_); + + // make sure the circuit can be inserted + result.flatten_registers(); + + // substitute the ppb vertex in the initial circuit with the routed + // result + mapping_frontier->circuit_.substitute(result, v); + return {true, {}}; + } + } + } + return {false, {}}; +} + +aas::CNotSynthType AASRouteRoutingMethod::get_cnotsynthtype() const { + return this->cnotsynthtype_; +} + +unsigned AASRouteRoutingMethod::get_aaslookahead() const { + return this->aaslookahead_; +} + +nlohmann::json AASRouteRoutingMethod::serialize() const { + nlohmann::json j; + j["aaslookahead"] = this->get_aaslookahead(); + j["cnotsynthtype"] = (unsigned)this->get_cnotsynthtype(); + j["name"] = "AASRouteRoutingMethod"; + return j; +} + +AASRouteRoutingMethod AASRouteRoutingMethod::deserialize( + const nlohmann::json& j) { + unsigned aaslookahead = j.at("aaslookahead").get(); + aas::CNotSynthType cnotsynthtype = + (aas::CNotSynthType)j.at("cnotsynthtype").get(); + return AASRouteRoutingMethod(aaslookahead, cnotsynthtype); +} + +} // namespace tket diff --git a/tket/src/Mapping/BoxDecomposition.cpp b/tket/src/Mapping/BoxDecomposition.cpp new file mode 100644 index 0000000000..084d3630e9 --- /dev/null +++ b/tket/src/Mapping/BoxDecomposition.cpp @@ -0,0 +1,77 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "Mapping/BoxDecomposition.hpp" + +#include "Mapping/MappingFrontier.hpp" + +namespace tket { + +BoxDecomposition::BoxDecomposition( + const ArchitecturePtr &_architecture, + MappingFrontier_ptr &_mapping_frontier) + : architecture_(_architecture), mapping_frontier_(_mapping_frontier) {} + +bool BoxDecomposition::solve() { + // Box type vertices are later removed from DAG + VertexList bin; + bool modified = false; + std::shared_ptr frontier_edges = + frontier_convert_vertport_to_edge( + this->mapping_frontier_->circuit_, + this->mapping_frontier_->linear_boundary); + CutFrontier next_cut = + this->mapping_frontier_->circuit_.next_q_cut(frontier_edges); + for (Vertex &vert : *next_cut.slice) { + Op_ptr op = this->mapping_frontier_->circuit_.get_Op_ptr_from_Vertex(vert); + if (op->get_desc().is_box() || + (op->get_type() == OpType::Conditional && + static_cast(*op).get_op()->get_desc().is_box())) { + if (this->mapping_frontier_->circuit_.substitute_box_vertex( + vert, Circuit::VertexDeletion::No)) { + modified = true; + bin.push_back(vert); + } + } + } + if (!modified) { + return false; + } + // Delete vertices + this->mapping_frontier_->circuit_.remove_vertices( + bin, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); + return true; +} + +BoxDecompositionRoutingMethod::BoxDecompositionRoutingMethod(){}; + +std::pair BoxDecompositionRoutingMethod::routing_method( + MappingFrontier_ptr &mapping_frontier, + const ArchitecturePtr &architecture) const { + BoxDecomposition bd(architecture, mapping_frontier); + bool modified = bd.solve(); + return {modified, {}}; +} + +nlohmann::json BoxDecompositionRoutingMethod::serialize() const { + nlohmann::json j; + j["name"] = "BoxDecompositionRoutingMethod"; + return j; +} + +BoxDecompositionRoutingMethod BoxDecompositionRoutingMethod::deserialize( + const nlohmann::json & /*j*/) { + return BoxDecompositionRoutingMethod(); +} + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/CMakeLists.txt b/tket/src/Mapping/CMakeLists.txt new file mode 100644 index 0000000000..889d06135f --- /dev/null +++ b/tket/src/Mapping/CMakeLists.txt @@ -0,0 +1,70 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project(tket-${COMP}) + +if (NOT ${COMP} STREQUAL "Mapping") + message(FATAL_ERROR "Unexpected component name.") +endif() + +add_library(tket-${COMP} + AASRoute.cpp + AASLabelling.cpp + LexicographicalComparison.cpp + LexiRoute.cpp + LexiRouteRoutingMethod.cpp + LexiLabelling.cpp + MappingFrontier.cpp + MappingManager.cpp + MultiGateReorder.cpp + BoxDecomposition.cpp + RoutingMethodCircuit.cpp + RoutingMethodJson.cpp + Verification.cpp) + +list(APPEND DEPS_${COMP} + ArchAwareSynth + Architecture + Characterisation + Circuit + Clifford + Converters + Gate + Graphs + Ops + OpType + PauliGraph + Placement + TokenSwapping + Utils) + +foreach(DEP ${DEPS_${COMP}}) + target_include_directories( + tket-${COMP} PRIVATE ${TKET_${DEP}_INCLUDE_DIR}) + target_link_libraries( + tket-${COMP} PRIVATE tket-${DEP}) +endforeach() + +target_include_directories(tket-${COMP} + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${TKET_${COMP}_INCLUDE_DIR} + ${TKET_${COMP}_INCLUDE_DIR}/${COMP}) + +target_link_libraries(tket-${COMP} PRIVATE ${CONAN_LIBS_SYMENGINE}) + +if (WIN32) + # For boost::uuid: + target_link_libraries(tket-${COMP} PRIVATE bcrypt) +endif() \ No newline at end of file diff --git a/tket/src/Mapping/LexiLabelling.cpp b/tket/src/Mapping/LexiLabelling.cpp new file mode 100644 index 0000000000..00be47608b --- /dev/null +++ b/tket/src/Mapping/LexiLabelling.cpp @@ -0,0 +1,36 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "Mapping/LexiLabelling.hpp" + +namespace tket { + +std::pair LexiLabellingMethod::routing_method( + MappingFrontier_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + LexiRoute lr(architecture, mapping_frontier); + return {lr.solve_labelling(), {}}; +} + +nlohmann::json LexiLabellingMethod::serialize() const { + nlohmann::json j; + j["name"] = "LexiLabellingMethod"; + return j; +} + +LexiLabellingMethod LexiLabellingMethod::deserialize( + const nlohmann::json& /*j*/) { + return LexiLabellingMethod(); +} + +} // namespace tket diff --git a/tket/src/Mapping/LexiRoute.cpp b/tket/src/Mapping/LexiRoute.cpp new file mode 100644 index 0000000000..1884b23ac1 --- /dev/null +++ b/tket/src/Mapping/LexiRoute.cpp @@ -0,0 +1,571 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Mapping/LexiRoute.hpp" + +#include "Mapping/MappingFrontier.hpp" +#include "Utils/Json.hpp" + +namespace tket { + +LexiRoute::LexiRoute( + const ArchitecturePtr& _architecture, + MappingFrontier_ptr& _mapping_frontier) + : architecture_(_architecture), mapping_frontier_(_mapping_frontier) { + // set initial logical->physical labelling + for (const Qubit& qb : this->mapping_frontier_->circuit_.all_qubits()) { + this->labelling_.insert({qb, qb}); + Node n(qb); + // store which Node have been asigned to Circuit already + if (this->architecture_->node_exists(n)) { + this->assigned_nodes_.insert(n); + } + } +} + +bool LexiRoute::assign_at_distance( + const UnitID& assignee, const Node& root, unsigned distances) { + node_set_t valid_nodes; + for (const Node& neighbour : + this->architecture_->nodes_at_distance(root, distances)) { + // A node is unassigned if it's empty or holding an ancilla + if (this->assigned_nodes_.find(neighbour) == this->assigned_nodes_.end() || + this->mapping_frontier_->ancilla_nodes_.find(neighbour) != + this->mapping_frontier_->ancilla_nodes_.end()) { + valid_nodes.insert(neighbour); + } + } + if (valid_nodes.size() == 1) { + auto it = valid_nodes.begin(); + // If the node to be assigned holds an ancilla + if (this->mapping_frontier_->ancilla_nodes_.find(*it) != + this->mapping_frontier_->ancilla_nodes_.end()) { + // => node *it is already present in circuit, but as an ancilla + // Merge the logical qubit to the end of the ancilla. + // notice that the merge_ancilla updates the qubit maps. + this->mapping_frontier_->merge_ancilla(assignee, *it); + this->mapping_frontier_->ancilla_nodes_.erase(*it); + this->labelling_.erase(*it); + this->labelling_[assignee] = *it; + } else { + this->labelling_[assignee] = *it; + this->assigned_nodes_.insert(*it); + // Assignee is a UnitID obtained from the circuit + // so we need to use the initial map to find the associated qubit. + this->mapping_frontier_->update_bimaps( + this->mapping_frontier_->get_qubit_from_circuit_uid(assignee), *it); + } + return true; + } + if (valid_nodes.size() > 1) { + auto it = valid_nodes.begin(); + lexicographical_distances_t winning_distances = + this->architecture_->get_distances(*it); + Node preserved_node = *it; + ++it; + for (; it != valid_nodes.end(); ++it) { + lexicographical_distances_t comparison_distances = + this->architecture_->get_distances(*it); + if (comparison_distances < winning_distances) { + preserved_node = *it; + winning_distances = comparison_distances; + } + } + if (this->mapping_frontier_->ancilla_nodes_.find(preserved_node) != + this->mapping_frontier_->ancilla_nodes_.end()) { + // => node *it is already present in circuit, but as an ancilla + // Merge the logical qubit to the end of the ancilla. + this->mapping_frontier_->merge_ancilla(assignee, preserved_node); + this->mapping_frontier_->ancilla_nodes_.erase(preserved_node); + this->labelling_.erase(preserved_node); + this->labelling_[assignee] = preserved_node; + } else { + // add ancilla case + this->labelling_[assignee] = preserved_node; + this->assigned_nodes_.insert(preserved_node); + this->mapping_frontier_->update_bimaps( + this->mapping_frontier_->get_qubit_from_circuit_uid(assignee), + preserved_node); + } + return true; + } + return false; +} + +bool LexiRoute::update_labelling() { + // iterate through interacting qubits, assigning them to an Architecture Node + // if they aren't already + bool relabelled = false; + for (const auto& pair : this->interacting_uids_) { + bool uid_0_exist = + this->architecture_->node_exists(Node(this->labelling_[pair.first])); + bool uid_1_exist = + this->architecture_->node_exists(Node(this->labelling_[pair.second])); + if (!uid_0_exist || !uid_1_exist) { + relabelled = true; + } + if (!uid_0_exist && !uid_1_exist) { + // Place one on free unassigned qubit + // Then place second later + // condition => No ancilla qubits assigned, so don't check + if (this->assigned_nodes_.size() == 0) { + // find nodes with best averaged distance to other nodes + // place it there... + std::set max_degree_nodes = + this->architecture_->max_degree_nodes(); + auto it = max_degree_nodes.begin(); + lexicographical_distances_t winning_distances = + this->architecture_->get_distances(*it); + Node preserved_node = Node(*it); + ++it; + for (; it != max_degree_nodes.end(); ++it) { + lexicographical_distances_t comparison_distances = + this->architecture_->get_distances(*it); + if (comparison_distances < winning_distances) { + preserved_node = Node(*it); + winning_distances = comparison_distances; + } + } + this->labelling_[pair.first] = preserved_node; + this->assigned_nodes_.insert(preserved_node); + // Update bimaps + this->mapping_frontier_->update_bimaps( + this->mapping_frontier_->get_qubit_from_circuit_uid(pair.first), + preserved_node); + uid_0_exist = true; + // given best node, do something + } else { + // Assign uid_0 to an unassigned node that is + // 1. adjacent to the already assigned nodes + // 2. has an unassigned neighbour + auto root_it = this->assigned_nodes_.begin(); + while (!uid_0_exist && root_it != this->assigned_nodes_.end()) { + Node root = *root_it; + uid_0_exist = this->assign_at_distance(pair.first, root, 1); + ++root_it; + } + if (!uid_0_exist) { + throw LexiRouteError( + "Unable to assign physical qubit - no free qubits remaining."); + } + } + } + if (!uid_0_exist && uid_1_exist) { + Node root(this->labelling_[pair.second]); + for (unsigned k = 1; k <= this->architecture_->get_diameter(); k++) { + uid_0_exist = this->assign_at_distance(pair.first, root, k); + if (uid_0_exist) { + break; + } + } + if (!uid_0_exist) { + throw LexiRouteError( + "Unable to assign physical qubit - no free qubits remaining."); + } + } + if (uid_0_exist && !uid_1_exist) { + Node root(this->labelling_[pair.first]); + for (unsigned k = 1; k <= this->architecture_->get_diameter(); k++) { + uid_1_exist = this->assign_at_distance(pair.second, root, k); + if (uid_1_exist) { + break; + } + } + if (!uid_1_exist) { + throw LexiRouteError( + "Unable to assign physical qubit - no free qubits remaining."); + } + } + } + return relabelled; +} + +/** + * LexiRoute::set_interacting_uids + * Updates this->interacting_uids_ with all "interacting" pairs + * of UnitID in this->mapping_frontier_ + */ +bool LexiRoute::set_interacting_uids( + AssignedOnly assigned_only, CheckRoutingValidity route_check, + CheckLabellingValidity label_check) { + // return types + this->interacting_uids_.clear(); + bool all_placed = true; + for (auto it = + this->mapping_frontier_->linear_boundary->get().begin(); + it != this->mapping_frontier_->linear_boundary->get().end(); + ++it) { + Edge e0 = this->mapping_frontier_->circuit_.get_nth_out_edge( + it->second.first, it->second.second); + Vertex v0 = this->mapping_frontier_->circuit_.target(e0); + // should never be input vertex, so can always use in_edges + Op_ptr op = this->mapping_frontier_->circuit_.get_Op_ptr_from_Vertex(v0); + if (op->get_type() != OpType::Barrier) { + int n_edges = this->mapping_frontier_->circuit_.n_in_edges_of_type( + v0, EdgeType::Quantum); + // make forwards = backwards + if (n_edges == 2) { + auto jt = it; + ++jt; + while (jt != + this->mapping_frontier_->linear_boundary->get().end()) { + // i.e. if vertices match + Edge e1 = this->mapping_frontier_->circuit_.get_nth_out_edge( + jt->second.first, jt->second.second); + Vertex v1 = this->mapping_frontier_->circuit_.target(e1); + if (v0 == v1) { + // we can assume a qubit will only be in one interaction + // we can assume from how we iterate through pairs that each qubit + // will only be found in one match + bool node0_exists = + this->architecture_->node_exists(Node(it->first)); + bool node1_exists = + this->architecture_->node_exists(Node(jt->first)); + if (!node0_exists || !node1_exists || op->get_desc().is_box()) { + all_placed = false; + if (route_check == CheckRoutingValidity::Yes) return false; + } + + if (assigned_only == AssignedOnly::No || + (node0_exists && node1_exists)) { + interacting_uids_.insert({it->first, jt->first}); + interacting_uids_.insert({jt->first, it->first}); + } + } + ++jt; + } + } + } + } + + // conditions for proceeding with labelling + if (label_check == CheckLabellingValidity::Yes) { + if (all_placed) { + return true; + } else { + return false; + } + } + // this should have left early when first found + if (route_check == CheckRoutingValidity::Yes) { + if (all_placed) { + if (interacting_uids_.size() > 0) { + return true; + } + return false; + } else { + return false; + } + } + // => either route_check true and all_placed so valid + // or !route_check and !label_check so return true and discard + return true; +} + +swap_set_t LexiRoute::get_candidate_swaps() { + swap_set_t candidate_swaps; + for (const auto& interaction : this->interacting_uids_) { + Node assigned_first = Node(this->labelling_[interaction.first]); + std::vector adjacent_uids_0 = + this->architecture_->nodes_at_distance(assigned_first, 1); + TKET_ASSERT(adjacent_uids_0.size() != 0); + for (const Node& neighbour : adjacent_uids_0) { + if (candidate_swaps.find({neighbour, assigned_first}) == + candidate_swaps.end()) { + candidate_swaps.insert({assigned_first, neighbour}); + } + } + Node assigned_second = Node(this->labelling_[interaction.second]); + std::vector adjacent_uids_1 = + this->architecture_->nodes_at_distance(assigned_second, 1); + TKET_ASSERT(adjacent_uids_1.size() != 0); + for (const Node& neighbour : adjacent_uids_1) { + if (candidate_swaps.find({neighbour, assigned_second}) == + candidate_swaps.end()) { + candidate_swaps.insert({assigned_second, neighbour}); + } + } + } + return candidate_swaps; +} + +bool is_vertex_CX(const Circuit& circ_, const Vertex& v) { + OpType ot = circ_.get_OpType_from_Vertex(v); + if (ot != OpType::CX) { + if (ot == OpType::Conditional) { + const Conditional& b = + static_cast(*circ_.get_Op_ptr_from_Vertex(v)); + if (b.get_op()->get_type() != OpType::CX) { + return false; + } + } else { + return false; + } + } + return true; +} + +std::pair LexiRoute::check_bridge( + const std::pair& swap, unsigned lookahead) { + std::pair output = {false, false}; + // first confirm whether it even has an interaction + auto it = this->interacting_uids_.find(swap.first); + if (it != this->interacting_uids_.end()) { // => in interaction + if (this->architecture_->get_distance(swap.first, Node(it->second)) == + 2) { // => could be bridge + // below should always return correct object given prior checks + VertPort vp = + (*this->mapping_frontier_->linear_boundary->find(swap.first)).second; + Edge out_edge = this->mapping_frontier_->circuit_.get_nth_out_edge( + vp.first, vp.second); + output.first = is_vertex_CX( + this->mapping_frontier_->circuit_, + this->mapping_frontier_->circuit_.target(out_edge)); + } + } + // repeat for second swap + it = this->interacting_uids_.find(swap.second); + if (it != this->interacting_uids_.end()) { + if (this->architecture_->get_distance(swap.second, Node(it->second)) == 2) { + VertPort vp = + (*this->mapping_frontier_->linear_boundary->find(swap.second)).second; + Edge out_edge = this->mapping_frontier_->circuit_.get_nth_out_edge( + vp.first, vp.second); + output.second = is_vertex_CX( + this->mapping_frontier_->circuit_, + this->mapping_frontier_->circuit_.target(out_edge)); + } + } + if ((output.first && output.second) || (!output.first && !output.second)) { + return {0, 0}; + } + // implies conditions are set to at least check if BRIDGE is better + swap_set_t candidate_swaps = { + swap, + {swap.first, + swap.first}}; // second swap here will just compare the base case + + // as with best swap finder, we create a set of candidate swap gates and + // then find best, except with only 2 swap (best swap and no swap) + while (candidate_swaps.size() > 1 /*some lookahead parameter*/) { + this->mapping_frontier_->advance_next_2qb_slice(lookahead); + // true bool means it only sets interacting uids if both uids are in + // architecture + this->set_interacting_uids( + AssignedOnly::Yes, CheckRoutingValidity::No, + CheckLabellingValidity::No); + // if 0, just take first swap rather than place + if (this->interacting_uids_.size() == 0) { + candidate_swaps = {*candidate_swaps.begin()}; + } else { + interacting_nodes_t convert_uids; + for (const auto& p : this->interacting_uids_) { + convert_uids.insert( + {Node(this->labelling_[p.first]), + Node(this->labelling_[p.second])}); + } + LexicographicalComparison lookahead_lc(this->architecture_, convert_uids); + lookahead_lc.remove_swaps_lexicographical(candidate_swaps); + } + } + // condition implies bridge is chosen + // if both remained then lexicographically equivalent under given conditions + // so either can be added with same consequences (for given hyper + // parameters) + if (*candidate_swaps.begin() == swap) { + output = {0, 0}; + } + return output; +} + +// Returns the distance between n1 and p1 and the distance between n2 and p2, +// distance ordered (greatest first) +const std::pair LexiRoute::pair_distances( + const Node& p0_first, const Node& p0_second, const Node& p1_first, + const Node& p1_second) const { + { + const bool valid = this->architecture_->node_exists(p0_first) && + this->architecture_->node_exists(p0_second) && + this->architecture_->node_exists(p1_first) && + this->architecture_->node_exists(p1_second); + TKET_ASSERT(valid); + } + size_t curr_dist1 = this->architecture_->get_distance(p0_first, p0_second); + size_t curr_dist2 = this->architecture_->get_distance(p1_first, p1_second); + return (curr_dist1 > curr_dist2) ? std::make_pair(curr_dist1, curr_dist2) + : std::make_pair(curr_dist2, curr_dist1); +} + +void LexiRoute::remove_swaps_decreasing(swap_set_t& swaps) { + swap_set_t remaining_swaps; + Node pair_first, pair_second; + for (const auto& swap : swaps) { + auto it = this->interacting_uids_.find(swap.first); + // => swap.first is in interaction + if (it != this->interacting_uids_.end()) { + // find its pair + pair_first = Node(it->second); + } else { + // => not interacting, assign pair to self (will give lexicographic + // distance 0) + pair_first = swap.first; + } + // => UnitID in SWAP are interacting + if (pair_first == swap.second) { + continue; + } + auto jt = this->interacting_uids_.find(swap.second); + // => swap.second is in interaction + if (jt != this->interacting_uids_.end()) { + pair_second = Node(jt->second); + } else { + pair_second = swap.second; + } + // => UnitID in SWAP are interacting + // Check should alrady be done with earlier continue + TKET_ASSERT(pair_second != swap.first); + + const std::pair& curr_dists = + this->pair_distances(swap.first, pair_first, swap.second, pair_second); + const std::pair& news_dists = + this->pair_distances(swap.second, pair_first, swap.first, pair_second); + if (news_dists >= curr_dists) { + continue; + } + remaining_swaps.insert(swap); + } +} + +bool LexiRoute::solve_labelling() { + bool all_labelled = this->set_interacting_uids( + AssignedOnly::No, CheckRoutingValidity::No, CheckLabellingValidity::Yes); + if (!all_labelled) { + this->update_labelling(); + this->mapping_frontier_->update_linear_boundary_uids(this->labelling_); + return true; + } + return false; +} + +bool LexiRoute::solve(unsigned lookahead) { + // work out if valid + + bool all_labelled = this->set_interacting_uids( + AssignedOnly::No, CheckRoutingValidity::Yes, CheckLabellingValidity::No); + if (!all_labelled) { + return false; + } + + // store a copy of the original this->mapping_frontier_->quantum_boundray + // this object will be updated and reset throughout the swap picking procedure + // so need to return it to original setting at end + unit_vertport_frontier_t copy; + for (const std::pair& pair : + this->mapping_frontier_->linear_boundary->get()) { + copy.insert({pair.first, pair.second}); + } + swap_set_t candidate_swaps = this->get_candidate_swaps(); + this->remove_swaps_decreasing(candidate_swaps); + TKET_ASSERT(candidate_swaps.size() != 0); + // Only want to substitute a single swap + // check next layer of interacting qubits and remove swaps until only one + // lexicographically superior swap is left + unsigned counter = 0; + while (candidate_swaps.size() > 1 && counter < lookahead) { + // if 0, just take first swap rather than place + if (this->interacting_uids_.size() == 0) { + break; + } else { + interacting_nodes_t convert_uids; + for (const auto& p : this->interacting_uids_) { + convert_uids.insert( + {Node(this->labelling_[p.first]), + Node(this->labelling_[p.second])}); + } + LexicographicalComparison lookahead_lc(this->architecture_, convert_uids); + lookahead_lc.remove_swaps_lexicographical(candidate_swaps); + } + counter++; + this->mapping_frontier_->advance_next_2qb_slice(lookahead); + // true bool means it only sets interacting uids if both uids are in + // architecture + this->set_interacting_uids( + AssignedOnly::Yes, CheckRoutingValidity::No, + CheckLabellingValidity::No); + } + // find best swap + auto it = candidate_swaps.end(); + --it; + + std::pair chosen_swap = *it; + this->mapping_frontier_->set_linear_boundary(copy); + + this->set_interacting_uids( + AssignedOnly::No, CheckRoutingValidity::No, CheckLabellingValidity::No); + std::pair check = this->check_bridge(chosen_swap, lookahead); + // set for final time, to allow gates to be correctly inserted, but then leave + // as is + // insert gates + this->mapping_frontier_->set_linear_boundary(copy); + if (!check.first && !check.second) { + // update circuit with new swap + // final_labelling is initial labelling permuted by single swap + this->mapping_frontier_->add_swap(chosen_swap.first, chosen_swap.second); + } else { + // only need to reset in bridge case + this->set_interacting_uids( + AssignedOnly::No, CheckRoutingValidity::No, CheckLabellingValidity::No); + + auto add_ordered_bridge = [&](const Node& n) { + auto it0 = this->mapping_frontier_->linear_boundary->find(n); + // this should implicitly be the case if this logic is reached + TKET_ASSERT(it0 != this->mapping_frontier_->linear_boundary->end()); + + Node other_node = Node(this->interacting_uids_[n]); + auto it1 = this->mapping_frontier_->linear_boundary->find(other_node); + // this should implicitly be the case if this logic is reached + TKET_ASSERT(it1 != this->mapping_frontier_->linear_boundary->end()); + + auto path = this->architecture_->get_path(n, other_node); + Node central = Node(path[1]); + + Edge n_edge = this->mapping_frontier_->circuit_.get_nth_out_edge( + it0->second.first, it0->second.second); + Edge other_edge = this->mapping_frontier_->circuit_.get_nth_out_edge( + it1->second.first, it1->second.second); + + unsigned port0 = + this->mapping_frontier_->circuit_.get_target_port(n_edge); + unsigned port1 = + this->mapping_frontier_->circuit_.get_target_port(other_edge); + // compare port ordering to get control vs target + TKET_ASSERT(port0 != port1); + if (port0 < port1) { + this->mapping_frontier_->add_bridge(n, central, other_node); + } else { + this->mapping_frontier_->add_bridge(other_node, central, n); + } + }; + + if (check.first) { + add_ordered_bridge(chosen_swap.first); + } + if (check.second) { + add_ordered_bridge(chosen_swap.second); + } + } + return true; +} + +} // namespace tket diff --git a/tket/src/Mapping/LexiRouteRoutingMethod.cpp b/tket/src/Mapping/LexiRouteRoutingMethod.cpp new file mode 100644 index 0000000000..672ed670cd --- /dev/null +++ b/tket/src/Mapping/LexiRouteRoutingMethod.cpp @@ -0,0 +1,45 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Mapping/LexiRouteRoutingMethod.hpp" + +namespace tket { + +LexiRouteRoutingMethod::LexiRouteRoutingMethod(unsigned _max_depth) + : max_depth_(_max_depth){}; + +std::pair LexiRouteRoutingMethod::routing_method( + MappingFrontier_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + LexiRoute lr(architecture, mapping_frontier); + return {lr.solve(this->max_depth_), {}}; +} + +unsigned LexiRouteRoutingMethod::get_max_depth() const { + return this->max_depth_; +} + +nlohmann::json LexiRouteRoutingMethod::serialize() const { + nlohmann::json j; + j["depth"] = this->get_max_depth(); + j["name"] = "LexiRouteRoutingMethod"; + return j; +} + +LexiRouteRoutingMethod LexiRouteRoutingMethod::deserialize( + const nlohmann::json& j) { + return LexiRouteRoutingMethod(j.at("depth").get()); +} + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/LexicographicalComparison.cpp b/tket/src/Mapping/LexicographicalComparison.cpp new file mode 100644 index 0000000000..4789349019 --- /dev/null +++ b/tket/src/Mapping/LexicographicalComparison.cpp @@ -0,0 +1,140 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Mapping/LexicographicalComparison.hpp" + +namespace tket { + +LexicographicalComparison::LexicographicalComparison( + const ArchitecturePtr& _architecture, + const interacting_nodes_t& _interacting_nodes) + : architecture_(_architecture), interacting_nodes_(_interacting_nodes) { + unsigned diameter = this->architecture_->get_diameter(); + + lexicographical_distances_t distance_vector(diameter, 0); + for (const auto& interaction : this->interacting_nodes_) { + // If Node not in architecture, don't add + if (!this->architecture_->node_exists(interaction.first) || + !this->architecture_->node_exists(interaction.second)) { + throw LexicographicalComparisonError( + "Constructor passed some interacting node not in architecture."); + } + // key->value already copied, assign reverse to map for later ease + this->interacting_nodes_[interaction.second] = interaction.first; + unsigned distance = this->architecture_->get_distance( + interaction.first, interaction.second); + if (distance > 0) { + ++distance_vector[diameter - distance]; + } + } + this->lexicographical_distances = distance_vector; +} + +void LexicographicalComparison::increment_distances( + lexicographical_distances_t& distances, + const std::pair& interaction, int increment) const { + const unsigned distances_index = + this->architecture_->get_diameter() - + this->architecture_->get_distance(interaction.first, interaction.second); + if (distances[distances_index] == 0 && increment < 0) { + throw LexicographicalComparisonError( + "Negative increment value is larger than value held at index, " + "modification not allowed."); + } + distances[distances_index] += increment; +} + +/** + * getter + */ +lexicographical_distances_t +LexicographicalComparison::get_lexicographical_distances() const { + return this->lexicographical_distances; +} + +/** + * get_updated_distances + * updates the "distance vector" (this->lexicographical_distances) to reflect + * the distance between interacting logical qubits given that the logical qubits + * present in "swap" have swapped physical qubits (Node) + */ +lexicographical_distances_t LexicographicalComparison::get_updated_distances( + const swap_t& swap) const { + // make a copy of base lexicographical distances + lexicographical_distances_t copy = this->lexicographical_distances; + if (swap.first == swap.second) { + return copy; + } + auto iq_it = this->interacting_nodes_.find(swap.first); + // first condition => first node not interacting with self, so update + // distances + if (iq_it != this->interacting_nodes_.end()) { + // update distances due to first swap node and qubit its interating with + // (assuming swap) + Node interacting = iq_it->second; + if (interacting != swap.second) { + increment_distances(copy, {swap.first, interacting}, -2); + // updates distances due to second swap node and qubit first is + // interacting with + increment_distances(copy, {swap.second, interacting}, 2); + } + } + iq_it = this->interacting_nodes_.find(swap.second); + // => second node not interacting with self + if (iq_it != this->interacting_nodes_.end()) { + Node interacting = iq_it->second; + if (interacting != swap.first) { + // update distances due to second node and qubit its interacting with + increment_distances(copy, {swap.second, interacting}, -2); + // update distannces due to frist node and qubit second node is + // interacting with + increment_distances(copy, {swap.first, interacting}, 2); + } + } + return copy; +} + +/** + * remove_swaps_lexicographical + * value x at index i of this->lexicographical_distancs => x logical qubits + * distance (diameter - i) away from the logical qubit they should be + * interacting with For each swap (swap_t) in "candidate_swaps" a + * new distances object is created given interacting_qubits Each distance for + * each swap is lexicographically compared If a distance is lexicographically + * larger than any other its corresponding swap is removed from candidate_swaps + * Therefore swaps remaining in candidate_swaps after this process are + * lexicographically identical for implied logical->physical qubit mapping and + * interacting logical + */ +void LexicographicalComparison::remove_swaps_lexicographical( + swap_set_t& candidate_swaps) const { + auto it = candidate_swaps.begin(); + lexicographical_distances_t winning_distances = + this->get_updated_distances(*it); + swap_set_t preserved_swaps = {*it}; + ++it; + for (; it != candidate_swaps.end(); ++it) { + lexicographical_distances_t comparison_distances = + this->get_updated_distances(*it); + + if (comparison_distances < winning_distances) { + preserved_swaps = {*it}; + winning_distances = comparison_distances; + } else if (comparison_distances == winning_distances) { + preserved_swaps.insert(*it); + } + } + candidate_swaps = preserved_swaps; +} +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/MappingFrontier.cpp b/tket/src/Mapping/MappingFrontier.cpp new file mode 100644 index 0000000000..037ea9d7db --- /dev/null +++ b/tket/src/Mapping/MappingFrontier.cpp @@ -0,0 +1,834 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Mapping/MappingFrontier.hpp" + +#include "Circuit/Circuit.hpp" +#include "Utils/UnitID.hpp" + +namespace tket { + +/** + * unit_vertport_frontier_t is , helper function returns + * UnitID corresponding to given VertPort + */ +UnitID get_unitid_from_unit_frontier( + const std::shared_ptr& u_frontier, + const VertPort& vp) { + auto it = u_frontier->get().find(vp); + if (it != u_frontier->get().end()) return it->first; + throw MappingFrontierError( + std::string("Edge provided not in unit_frontier_t object.")); +} + +/** + * bit_frontier_t is , helper function returns + * Bit corresponding to given Edge + */ +static Bit get_bit_from_bool_frontier( + const std::shared_ptr& b_frontier, const EdgeVec& ev) { + TKET_ASSERT(ev.size() > 0); + // condition is that if one Edge in EdgeVector ev is in a + // held bundle, then all are so return true + for (auto it = b_frontier->get().begin(); + it != b_frontier->get().end(); ++it) { + for (const Edge& e0 : ev) { + for (const Edge& e1 : it->second) { + if (e0 == e1) { + return it->first; + } + } + } + } + throw MappingFrontierError( + std::string("EdgeVec provided not in b_frontier_t object.")); +} + +std::shared_ptr frontier_convert_vertport_to_edge( + const Circuit& circuit, + const std::shared_ptr& u_frontier) { + // make empty unit_frontier_t object + std::shared_ptr output_frontier = + std::make_shared(); + // iterate through u_frontier, convert VertPort to Edge and insert + for (const std::pair& pair : u_frontier->get()) { + output_frontier->insert( + {pair.first, + circuit.get_nth_out_edge(pair.second.first, pair.second.second)}); + } + return output_frontier; +} + +/** + * Initialise linear_boundary and boolean_boundary from + * out edges of Input vertices + */ +MappingFrontier::MappingFrontier(Circuit& _circuit) : circuit_(_circuit) { + this->linear_boundary = std::make_shared(); + this->boolean_boundary = std::make_shared(); + this->bimaps_ = std::make_shared(); + + // Set up {UnitID, VertPort} objects for quantum and classical boundaries + for (const Qubit& qb : this->circuit_.all_qubits()) { + this->linear_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); + this->bimaps_->initial.insert({qb, qb}); + this->bimaps_->final.insert({qb, qb}); + } + for (const Bit& bit : this->circuit_.all_bits()) { + Vertex bit_input = this->circuit_.get_in(bit); + EdgeVec bool_bundle = this->circuit_.get_nth_b_out_bundle(bit_input, 0); + if (bool_bundle.size() != 0) { + this->boolean_boundary->insert({bit, bool_bundle}); + } + if (this->circuit_.n_out_edges_of_type(bit_input, EdgeType::Classical) > + 0) { + this->linear_boundary->insert({bit, {bit_input, 0}}); + } + } +} + +/** + * Initialise linear_boundary and boolean_boundary from + * out edges of Input vertices + */ +MappingFrontier::MappingFrontier( + Circuit& _circuit, std::shared_ptr _bimaps) + : circuit_(_circuit), bimaps_(_bimaps) { + // Check that the maps are valid + for (const Qubit& q : _circuit.all_qubits()) { + if (_bimaps->initial.right.find(q) == _bimaps->initial.right.end()) { + throw MappingFrontierError( + "Uid " + q.repr() + " not found in initial map."); + } + if (_bimaps->final.right.find(q) == _bimaps->final.right.end()) { + throw MappingFrontierError( + "Uid " + q.repr() + " not found in final map."); + } + } + + this->linear_boundary = std::make_shared(); + this->boolean_boundary = std::make_shared(); + + // Set up {UnitID, VertPort} objects for quantum and classical boundaries + for (const Qubit& qb : this->circuit_.all_qubits()) { + this->linear_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); + } + for (const Bit& bit : this->circuit_.all_bits()) { + Vertex bit_input = this->circuit_.get_in(bit); + EdgeVec bool_bundle = this->circuit_.get_nth_b_out_bundle(bit_input, 0); + if (bool_bundle.size() != 0) { + this->boolean_boundary->insert({bit, bool_bundle}); + } else { + this->linear_boundary->insert({bit, {bit_input, 0}}); + } + } +} + +MappingFrontier::MappingFrontier(const MappingFrontier& mapping_frontier) + : circuit_(mapping_frontier.circuit_), bimaps_(mapping_frontier.bimaps_) { + this->linear_boundary = std::make_shared(); + this->boolean_boundary = std::make_shared(); + + for (const std::pair& pair : + mapping_frontier.linear_boundary->get()) { + this->linear_boundary->insert({pair.first, pair.second}); + } + for (const std::pair& pair : + mapping_frontier.boolean_boundary->get()) { + EdgeVec edges; + for (const Edge& edge : pair.second) { + edges.push_back(edge); + } + this->boolean_boundary->insert({pair.first, edges}); + } + for (const Node& node : mapping_frontier.ancilla_nodes_) { + this->ancilla_nodes_.insert(node); + } +} + +void MappingFrontier::advance_next_2qb_slice(unsigned max_advance) { + bool boundary_updated = false; + unsigned loop = 0; + std::shared_ptr current_frontier = + frontier_convert_vertport_to_edge(this->circuit_, this->linear_boundary); + + // Get all vertices in first cut + VertexVec immediate_cut_vertices_v = + *(this->circuit_ + .next_cut(current_frontier, std::make_shared()) + .slice); + + do { + // each do section first finds the next set of edges after the held set + // for edges with target vertices with all their edges presented in the + // first set + loop++; + boundary_updated = false; + // produce next frontier object + std::shared_ptr next_frontier = + std::make_shared(); + + for (const std::pair& pair : + current_frontier->get()) { + // if target_v not in immediate_cut_vertices, then do not pass it + Vertex target_v = this->circuit_.target(pair.second); + EdgeVec in_edges = + this->circuit_.get_in_edges_of_type(target_v, EdgeType::Quantum); + + bool in_slice = + std::find( + immediate_cut_vertices_v.begin(), immediate_cut_vertices_v.end(), + target_v) != immediate_cut_vertices_v.end(); + OpType ot = this->circuit_.get_OpType_from_Vertex(target_v); + if (((!in_slice && in_edges.size() > 1) || ot == OpType::Output || + ot == OpType::ClOutput) && + this->circuit_.get_OpType_from_Vertex(target_v) != OpType::Barrier) { + // Vertex either not allowed to pass, or is output vertex => update + // nothing + next_frontier->insert({pair.first, pair.second}); + } else { + // vertex can be surpassed, so update linear_boundary and + // next_frontier with next edge + Edge next_edge = this->circuit_.get_next_edge(target_v, pair.second); + this->linear_boundary->replace( + this->linear_boundary->get().find(pair.first), + {pair.first, + {target_v, this->circuit_.get_source_port(next_edge)}}); + next_frontier->insert({pair.first, next_edge}); + } + } + // Given new frontier, find the actual next cut + CutFrontier next_cut = this->circuit_.next_cut( + next_frontier, std::make_shared()); + // For each vertex in a slice, if its physically permitted, update + // linear_boundary with quantum out edges from vertex (i.e. + // next_cut.u_frontier) + for (const Vertex& vert : *next_cut.slice) { + // Output means we don't want to pass, so just leave + OpType ot = this->circuit_.get_OpType_from_Vertex(vert); + if (ot == OpType::Output || ot == OpType::ClOutput) { + continue; + } + EdgeVec in_edges = + this->circuit_.get_in_edges_of_type(vert, EdgeType::Quantum); + // More than 1 edge means we want to keep edges, so continue + if (in_edges.size() > 1) { + continue; + } + // can guarantee that we update now as non-updating cases have been + // continued + boundary_updated = true; + // push edge past single qubit vertex, repeat + UnitID uid = get_unitid_from_unit_frontier( + this->linear_boundary, {this->circuit_.source(in_edges[0]), + this->circuit_.get_source_port(in_edges[0])}); + + Edge replacement_edge = + next_cut.u_frontier->get().find(uid)->second; + + Vertex source_vertex = this->circuit_.source(replacement_edge); + port_t source_port = this->circuit_.get_source_port(replacement_edge); + + this->linear_boundary->replace( + this->linear_boundary->get().find(uid), + {uid, {source_vertex, source_port}}); + } + current_frontier = next_frontier; + } while (boundary_updated && loop <= max_advance); + return; +} + +/** + * advance_frontier_boundary + * terminates when next_cut returns a "slice" where + * no vertices are physically permitted by the architecture + * linear_boundary and boolean_boundary updated to reflect this + */ +void MappingFrontier::advance_frontier_boundary( + const ArchitecturePtr& architecture) { + bool boundary_updated = false; + do { + // next_cut.slice vertices in_edges from this->linear_boundary + boundary_updated = false; + std::shared_ptr l_frontier_edges = + frontier_convert_vertport_to_edge( + this->circuit_, this->linear_boundary); + + CutFrontier next_cut = + this->circuit_.next_cut(l_frontier_edges, this->boolean_boundary); + // For each vertex in a slice, if its physically permitted, update + // linear_boundary with quantum out edges from vertex (i.e. + // next_cut.u_frontier) + // update boolean_boundary in line + for (const Vertex& vert : *next_cut.slice) { + // for each boolean edge into vertex, collect associated Bit and port + // number n.b. a single Bit may have multiple "in bundles" to different + // vertices in the same cut + std::map bool_uid_port_set; + std::vector b_in_bundles = this->circuit_.get_b_in_bundles(vert); + for (unsigned i = 0; i < b_in_bundles.size(); i++) { + EdgeVec ev = b_in_bundles[i]; + if (ev.size() > 0) { + bool_uid_port_set.insert( + {get_bit_from_bool_frontier(this->boolean_boundary, ev), i}); + } + } + + // for each quantum edge into vertex, collect associated Qubit/Node + // don't collect port as this->linear_boundary holds this + // each UnitID will only have one quantum edge active + std::vector l_uids; // linear unit id + std::vector nodes; // quantum/node only + for (const Edge& e : + this->circuit_.get_in_edges_of_type(vert, EdgeType::Quantum)) { + UnitID uid = get_unitid_from_unit_frontier( + this->linear_boundary, + {this->circuit_.source(e), this->circuit_.get_source_port(e)}); + l_uids.push_back(uid); + nodes.push_back(Node(uid)); + } + + // for each classical edge store related UnitID in l_uids + // each Bit will only have one classical edge active + // n.b. some vertices may introduce new Bit to the boolean_boundary + // e.g. A Measurement result may be passed to a conditional as + // therefore, all Bit not in the Boolean boundary are also stored + // in case the operation does this and the this->boolean_boundary + // needs to be updated + std::map extra_bool_uid_port_set; + for (const Edge& e : + this->circuit_.get_in_edges_of_type(vert, EdgeType::Classical)) { + // for updating linear boundary + port_t port_source = this->circuit_.get_source_port(e); + UnitID uid = get_unitid_from_unit_frontier( + this->linear_boundary, {this->circuit_.source(e), port_source}); + l_uids.push_back(uid); + + // for potentially adding new Bit to boolean boundary + // port_target makes it possible to track which "out bundle" corresponds + // to this Bit + port_t port_target = this->circuit_.get_target_port(e); + Bit bit = Bit(uid); + if (bool_uid_port_set.find(bit) == bool_uid_port_set.end()) { + extra_bool_uid_port_set.insert({bit, port_target}); + } + } + + if (nodes.size() == 0 || + this->valid_boundary_operation( + architecture, this->circuit_.get_Op_ptr_from_Vertex(vert), + nodes)) { + // if no valid operation, boundary not updated and while loop terminates + boundary_updated = true; + // update linear UnitID (Qubits&Quantum edges, Bits&Classical edges) + for (const UnitID& uid : l_uids) { + Edge replacement_edge = + next_cut.u_frontier->get().find(uid)->second; + Vertex source_vertex = this->circuit_.source(replacement_edge); + port_t source_port = this->circuit_.get_source_port(replacement_edge); + this->linear_boundary->replace( + this->linear_boundary->get().find(uid), + {uid, {source_vertex, source_port}}); + } + // update booleans + // n.b. its possible a boolean path terminates with an operation + // however, the port should be preserved so we can track the correct Bit + // {Bit, port_t} + for (auto it = bool_uid_port_set.begin(); it != bool_uid_port_set.end(); + ++it) { + std::vector out_bundles = + this->circuit_.get_b_out_bundles(vert); + + port_t port = it->second; + TKET_ASSERT(out_bundles.size() > port); // safe port indexing + // However, this Bit may have boolean values in other Vertices in + // slice therefore, we remove every edge from the vertex in_bundle for + // this port from the boolean_boundary and then insert these new edges + std::vector in_bundles = + this->circuit_.get_b_in_bundles(vert); + + TKET_ASSERT(in_bundles.size() > port); // safe port indexing + EdgeVec in_bundle = in_bundles[port]; + // Bit should be in boolean_boundary + Bit bit = it->first; + auto jt = this->boolean_boundary->get().find(bit); + TKET_ASSERT(jt != this->boolean_boundary->get().end()); + // construct a new EdgeVec object with replaced Edge and persisting + // Edge + + EdgeVec new_boolean_edges; + + for (const Edge& e : jt->second) { + // => edge isn't being replaced + if (std::find(in_bundle.begin(), in_bundle.end(), e) == + in_bundle.end()) { + new_boolean_edges.push_back(e); + } + } + // add all new edges from out bundle to the boundary + for (const Edge& e : out_bundles[port]) { + new_boolean_edges.push_back(e); + } + + // boolean no longer needed + if (new_boolean_edges.size() == 0) { + this->boolean_boundary->erase(jt); + } else { + // replace boolean boundary + this->boolean_boundary->replace(jt, {bit, new_boolean_edges}); + } + } + // Some operations may spawn a Boolean wire not held in boolean_boundary + // this checks for any new wires and if true, adds to boolean_boundary + // {Bit, port_t} + for (auto it = extra_bool_uid_port_set.begin(); + it != extra_bool_uid_port_set.end(); ++it) { + std::vector source_out = + this->circuit_.get_b_out_bundles(vert); + // If source_out has more bundles than port value, then we know + // it's been spawned (multiple could be spawned at same vertex) + port_t port = it->second; + if (source_out.size() > port) { + EdgeVec new_boolean_wire = source_out[port]; + // add new edges to boolean_boundary + // note that a boolean cannot be spawned in multiple vertices + // as the incoming Bit wire is linear + // Measure always create a boolean, even if empty of edges + // => check size before adding + if (new_boolean_wire.size() > 0) { + this->boolean_boundary->insert({it->first, new_boolean_wire}); + } + } + } + } + } + } while (boundary_updated); + return; +} + +EdgeVec convert_u_frontier_to_edges(const unit_frontier_t& u_frontier) { + EdgeVec edges; + for (const std::pair& pair : u_frontier.get()) { + edges.push_back(pair.second); + } + return edges; +} + +Subcircuit MappingFrontier::get_frontier_subcircuit( + unsigned _max_subcircuit_depth, unsigned _max_subcircuit_size) const { + CutFrontier current_cut = this->circuit_.next_cut( + frontier_convert_vertport_to_edge(this->circuit_, this->linear_boundary), + this->boolean_boundary); + + unsigned subcircuit_depth = 1; + VertexSet subcircuit_vertices( + current_cut.slice->begin(), current_cut.slice->end()); + // add cuts of vertices to subcircuit_vertices until constraints met, or end + // of circuit reached + while (subcircuit_depth < _max_subcircuit_depth && + unsigned(subcircuit_vertices.size()) < _max_subcircuit_size && + current_cut.slice->size() > 0) { + current_cut = + this->circuit_.next_cut(current_cut.u_frontier, current_cut.b_frontier); + subcircuit_depth++; + subcircuit_vertices.insert( + current_cut.slice->begin(), current_cut.slice->end()); + } + TKET_ASSERT(subcircuit_vertices.size() != 0); + return Subcircuit( + convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( + this->circuit_, this->linear_boundary)), + convert_u_frontier_to_edges(*current_cut.u_frontier), + subcircuit_vertices); +} + +UnitID MappingFrontier::get_qubit_from_circuit_uid(const UnitID& uid) { + auto it = this->bimaps_->initial.right.find(uid); + if (it == this->bimaps_->initial.right.end()) { + throw MappingFrontierError("UnitID not found in initial map."); + } + return it->second; +} + +void MappingFrontier::update_bimaps(UnitID qubit, UnitID node) { + // Update initial map + auto init_it = this->bimaps_->initial.left.find(qubit); + if (init_it == this->bimaps_->initial.left.end()) + throw MappingFrontierError("Qubit not found in initial map."); + this->bimaps_->initial.left.erase(init_it); + this->bimaps_->initial.left.insert({qubit, node}); + // Update final map + auto final_it = this->bimaps_->final.left.find(qubit); + if (final_it == this->bimaps_->final.left.end()) + throw MappingFrontierError("Qubit not found in final map."); + this->bimaps_->final.left.erase(final_it); + this->bimaps_->final.left.insert({qubit, node}); +} + +void MappingFrontier::update_linear_boundary_uids( + const unit_map_t& relabelled_uids) { + for (const std::pair& label : relabelled_uids) { + // implies new labelling + if (label.first != label.second) { + // by type, label.first already assumed in circuit + // this condition means label.second also in circuit + // implies that a merging is done -> remove first qubit + + if (this->linear_boundary->get().find(label.second) != + this->linear_boundary->get().end()) { + // erase, assume updated already + this->linear_boundary->erase(label.first); + } else { + auto current_label_it = + this->linear_boundary->get().find(label.first); + // relabel "label.first" with "label.second" + this->linear_boundary->replace( + current_label_it, {label.second, current_label_it->second}); + unit_map_t relabel = {label}; + this->circuit_.rename_units(relabel); + } + } + } +} + +void MappingFrontier::permute_subcircuit_q_out_hole( + const unit_map_t& final_permutation, Subcircuit& subcircuit) { + EdgeVec new_q_out_hole; + int i = 0; + // Change to iterate through final permutation first? + if (this->linear_boundary->size() != final_permutation.size()) { + throw MappingFrontierError( + "Number of Qubits in mapping permutation does not match number of " + "Qubits in MappingFrontier boundary, for permuting Qubits as with " + "routed Subcircuit."); + } + for (const std::pair& pair : + this->linear_boundary->get()) { + auto it = final_permutation.find(pair.first); + if (it == final_permutation.end()) { + throw MappingFrontierError("Qubit in boundary not in permutation."); + } + std::pair uid_pair = *it; + if (uid_pair.first == uid_pair.second) { + new_q_out_hole.push_back(subcircuit.q_out_hole[i]); + } else { + int j = 0; + for (auto it = this->linear_boundary->get().begin(); + it != this->linear_boundary->get().end(); ++it) { + if (it->first == uid_pair.second) { + new_q_out_hole.push_back(subcircuit.q_out_hole[j]); + break; + } + j++; + } + } + i++; + } + subcircuit.q_out_hole = new_q_out_hole; +} + +/** + * MappingFrontier::get_u_frontier_default_unit_map + * Map from default qubit register qubits to UnitIDs in linear_boundary + */ +unit_map_t MappingFrontier::get_default_to_linear_boundary_unit_map() const { + unsigned i = 0; + unit_map_t default_to_u_frontier_map; + for (const std::pair& pair : + this->linear_boundary->get()) { + default_to_u_frontier_map.insert({Qubit(i), pair.first}); + i++; + } + return default_to_u_frontier_map; +} + +void MappingFrontier::set_linear_boundary( + const unit_vertport_frontier_t& new_boundary) { + this->linear_boundary = std::make_shared(); + for (const std::pair& pair : new_boundary.get()) { + this->linear_boundary->insert(pair); + } +} + +/** + * add_swap + * Inserts an OpType::SWAP gate into the uid_0 and uid_1 edges held in + * linear_boundary This directly modifies circuit_ Updates linear_boundary to + * reflect new edges + */ +void MappingFrontier::add_swap(const UnitID& uid_0, const UnitID& uid_1) { + // get iterators to linear_boundary uids + auto uid0_in_it = this->linear_boundary->find(uid_0); + auto uid1_in_it = this->linear_boundary->find(uid_1); + + // Add Qubit if not in MappingFrontier boundary (i.e. not in circuit) + if (uid0_in_it == this->linear_boundary->end()) { + this->add_ancilla(uid_0); + uid0_in_it = this->linear_boundary->find(uid_0); + } + if (uid1_in_it == this->linear_boundary->end()) { + this->add_ancilla(uid_1); + uid1_in_it = this->linear_boundary->find(uid_1); + } + + // update held ancillas + // the location/id of the "ancilla node" changes when a SWAP occurs + Node n0 = Node(uid_0); + Node n1 = Node(uid_1); + bool uid0_ancilla = + this->ancilla_nodes_.find(n0) != this->ancilla_nodes_.end(); + bool uid1_ancilla = + this->ancilla_nodes_.find(n1) != this->ancilla_nodes_.end(); + + if (uid0_ancilla && !uid1_ancilla) { + this->ancilla_nodes_.erase(n0); + this->ancilla_nodes_.insert(n1); + } + if (!uid0_ancilla && uid1_ancilla) { + this->ancilla_nodes_.erase(n1); + this->ancilla_nodes_.insert(n0); + } + + // Get predecessor edges to SWAP insert location + VertPort vp0 = uid0_in_it->second; + VertPort vp1 = uid1_in_it->second; + EdgeVec predecessors = { + this->circuit_.get_nth_out_edge(vp0.first, vp0.second), + this->circuit_.get_nth_out_edge(vp1.first, vp1.second)}; + + // add SWAP vertex to circuit_ and rewire into predecessor + Vertex swap_v = this->circuit_.add_vertex(OpType::SWAP); + this->circuit_.rewire( + swap_v, predecessors, {EdgeType::Quantum, EdgeType::Quantum}); + + // Update boundary to reflect new edges + EdgeVec successors = this->circuit_.get_all_out_edges(swap_v); + this->circuit_.dag[successors[0]].ports.first = 1; + this->circuit_.dag[successors[1]].ports.first = 0; + + this->linear_boundary->replace( + uid0_in_it, {uid_0, {this->circuit_.source(successors[1]), 0}}); + this->linear_boundary->replace( + uid1_in_it, {uid_1, {this->circuit_.source(successors[0]), 1}}); + + // update output vertices of quantum boundary of circuit to reflect changing + // qubit paths + auto uid0_circuit_boundary_it = + this->circuit_.boundary.get().find(uid_0); + auto uid1_circuit_boundary_it = + this->circuit_.boundary.get().find(uid_1); + + Vertex uid0_out = uid0_circuit_boundary_it->out_; + Vertex uid1_out = uid1_circuit_boundary_it->out_; + Vertex uid0_in = uid0_circuit_boundary_it->in_; + Vertex uid1_in = uid1_circuit_boundary_it->in_; + + this->circuit_.boundary.get().erase(uid_0); + this->circuit_.boundary.get().erase(uid_1); + + this->circuit_.boundary.get().insert({uid_0, uid0_in, uid1_out}); + this->circuit_.boundary.get().insert({uid_1, uid1_in, uid0_out}); + + std::map final_map = {{n0, n1}, {n1, n0}}; + + update_maps(this->bimaps_, {}, final_map); +} + +void MappingFrontier::add_bridge( + const UnitID& control, const UnitID& central, const UnitID& target) { + // get predecessors + auto control_in_it = this->linear_boundary->find(control); + auto central_in_it = this->linear_boundary->find(central); + auto target_in_it = this->linear_boundary->find(target); + + // by virtue of method, control and target qubit will always be in BRIDGE. + // However, distances used to check BRIDGE and find PATH may use + // central qubit that is unallocated, in which add it. + if (central_in_it == this->linear_boundary->end()) { + this->add_ancilla(central); + central_in_it = this->linear_boundary->find(central); + } + + VertPort vp_control = control_in_it->second; + VertPort vp_central = central_in_it->second; + VertPort vp_target = target_in_it->second; + + EdgeVec predecessors = { + this->circuit_.get_nth_out_edge(vp_control.first, vp_control.second), + this->circuit_.get_nth_out_edge(vp_central.first, vp_central.second), + this->circuit_.get_nth_out_edge(vp_target.first, vp_target.second), + }; // get cx vertex + // this should be guaranteeds by pre-checks + Vertex cx_v = this->circuit_.target(predecessors[0]); + // add bridge + Vertex bridge_v = this->circuit_.add_vertex(OpType::BRIDGE); + // add bridge vertex to circuit + this->circuit_.rewire( + bridge_v, predecessors, + {EdgeType::Quantum, EdgeType::Quantum, EdgeType::Quantum}); + // remove old cx vertex + this->circuit_.remove_vertex( + cx_v, Circuit::GraphRewiring::Yes, Circuit::VertexDeletion::Yes); +} + +void MappingFrontier::add_ancilla(const UnitID& ancilla) { + Qubit qb(ancilla); + this->circuit_.add_qubit(qb); + this->linear_boundary->insert({qb, {this->circuit_.get_in(qb), 0}}); + + this->bimaps_->initial.insert({qb, qb}); + this->bimaps_->final.insert({qb, qb}); + this->ancilla_nodes_.insert(Node(ancilla)); + UnitID uid_ancilla(ancilla); + + unit_map_t update_map; + update_map.insert({uid_ancilla, uid_ancilla}); + + update_maps(this->bimaps_, update_map, update_map); +} + +void MappingFrontier::merge_ancilla( + const UnitID& merge, const UnitID& ancilla) { + // get output and input vertices + Vertex merge_v_in = this->circuit_.get_in(merge); + Vertex merge_v_out = this->circuit_.get_out(merge); + Vertex ancilla_v_out = this->circuit_.get_out(ancilla); + // find source vertex & port of merge_v_out + // output vertex, so can assume single edge + Edge merge_out_edge = this->circuit_.get_nth_out_edge(merge_v_in, 0); + Edge ancilla_in_edge = this->circuit_.get_nth_in_edge(ancilla_v_out, 0); + // Find port number + port_t merge_target_port = this->circuit_.get_target_port(merge_out_edge); + port_t ancilla_source_port = this->circuit_.get_source_port(ancilla_in_edge); + // Find vertices + Vertex merge_v_target = this->circuit_.target(merge_out_edge); + Vertex ancilla_v_source = this->circuit_.source(ancilla_in_edge); + + // remove and replace edges + this->circuit_.remove_edge(merge_out_edge); + this->circuit_.remove_edge(ancilla_in_edge); + this->circuit_.add_edge( + {ancilla_v_source, ancilla_source_port}, + {merge_v_target, merge_target_port}, EdgeType::Quantum); + + // instead of manually updating all boundaries, we change which output + // vertex the qubit paths to + Edge merge_in_edge = this->circuit_.get_nth_in_edge(merge_v_out, 0); + port_t merge_source_port = this->circuit_.get_source_port(merge_in_edge); + Vertex merge_v_source = this->circuit_.source(merge_in_edge); + + this->circuit_.remove_edge(merge_in_edge); + this->circuit_.add_edge( + {merge_v_source, merge_source_port}, {ancilla_v_out, 0}, + EdgeType::Quantum); + + // remove empty vertex wire, relabel dag vertices + this->circuit_.dag[merge_v_in].op = get_op_ptr(OpType::noop); + this->circuit_.dag[merge_v_out].op = get_op_ptr(OpType::noop); + this->circuit_.remove_vertex( + merge_v_in, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); + this->circuit_.remove_vertex( + merge_v_out, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); + + // Can now just erase "merge" qubit from the circuit + this->circuit_.boundary.get().erase(merge); + + // Update the qubit mappings + // let's call the arguments ancilla_node and merge_node + // e.g. before merge: + // initial := {ancilla_q:node_x, merge_q:some_uid} + // final := {ancilla_q:ancilla_node, merge_q:merge_node} + // e.g. after merge: + // initial := {merge_q:node_x} + // final := {merge_q:ancilla_node} + // Basically, in both qubit maps, erase the entry with qubit merge_q + // then replace the entry ancilla_q -> x with the merge_q -> x + + auto merge_it = this->bimaps_->initial.right.find(merge); + TKET_ASSERT(merge_it != this->bimaps_->initial.right.end()); + UnitID merge_q = merge_it->second; + this->bimaps_->initial.right.erase(merge_it); + this->bimaps_->final.left.erase(merge_q); + // Find ancilla_q + auto final_it = this->bimaps_->final.right.find(ancilla); + UnitID ancilla_q = final_it->second; + // Replace in final map + this->bimaps_->final.right.erase(final_it); + this->bimaps_->final.left.insert({merge_q, ancilla}); + // Replace in initial map + auto init_it = this->bimaps_->initial.left.find(ancilla_q); + UnitID init_ancilla_node = init_it->second; + this->bimaps_->initial.left.erase(init_it); + this->bimaps_->initial.left.insert({merge_q, init_ancilla_node}); +} + +bool MappingFrontier::valid_boundary_operation( + const ArchitecturePtr& architecture, const Op_ptr& op, + const std::vector& uids) const { + // boxes are never allowed + OpType ot = op->get_type(); + if (is_box_type(ot)) { + return false; + } + + if (ot == OpType::Conditional) { + Op_ptr cond_op_ptr = static_cast(*op).get_op(); + // conditional boxes are never allowed, too + OpType ot = cond_op_ptr->get_type(); + while (ot == OpType::Conditional) { + cond_op_ptr = static_cast(*op).get_op(); + ot = cond_op_ptr->get_type(); + if (is_box_type(ot)) { + return false; + } + } + } + + // Barriers are allways allowed + if (ot == OpType::Barrier) { + return true; + } + + // this currently allows unplaced single qubits gates + // this should be changes in the future + if (uids.size() == 1) { + return true; + } + + // allow two qubit gates only for placed and connected nodes + if (uids.size() == 2) { + bool n0 = architecture->node_exists(uids[0]); + bool n1 = architecture->node_exists(uids[1]); + if (n0 && n1) { + bool bde = architecture->bidirectional_edge_exists(uids[0], uids[1]); + if (bde) { + return true; + } + } + } else if (uids.size() == 3 && ot == OpType::BRIDGE) { + bool con_0_exists = + architecture->bidirectional_edge_exists(uids[0], uids[1]); + bool con_1_exists = + architecture->bidirectional_edge_exists(uids[2], uids[1]); + if (architecture->node_exists(uids[0]) && + architecture->node_exists(uids[1]) && + architecture->node_exists(uids[2]) && con_0_exists && con_1_exists) { + return true; + } + } + + return false; +} + +} // namespace tket diff --git a/tket/src/Mapping/MappingManager.cpp b/tket/src/Mapping/MappingManager.cpp new file mode 100644 index 0000000000..2d5de2fca1 --- /dev/null +++ b/tket/src/Mapping/MappingManager.cpp @@ -0,0 +1,157 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Mapping/MappingManager.hpp" + +#include "Architecture/BestTsaWithArch.hpp" + +namespace tket { + +MappingManager::MappingManager(const ArchitecturePtr& _architecture) + : architecture_(_architecture) {} + +bool MappingManager::route_circuit( + Circuit& circuit, const std::vector& routing_methods, + bool label_isolated_qubits) const { + return this->route_circuit_with_maps( + circuit, routing_methods, std::make_shared(), + label_isolated_qubits); +} + +bool MappingManager::route_circuit_with_maps( + Circuit& circuit, const std::vector& routing_methods, + std::shared_ptr maps, bool label_isolated_qubits) const { + if (circuit.n_qubits() > this->architecture_->n_nodes()) { + std::string error_string = + "Circuit has" + std::to_string(circuit.n_qubits()) + + " logical qubits. Architecture has " + + std::to_string(this->architecture_->n_nodes()) + + " physical qubits. Circuit to be routed can not have more " + "qubits than the Architecture."; + throw MappingManagerError(error_string); + } + + // mapping_frontier tracks boundary between routed & un-routed in circuit + // when initialised, boundary is over output edges of input vertices + MappingFrontier_ptr mapping_frontier; + if (!maps->initial.empty() && !maps->final.empty()) { + mapping_frontier = std::make_shared(circuit, maps); + } else { + mapping_frontier = std::make_shared(circuit); + } + // updates routed/un-routed boundary + + mapping_frontier->advance_frontier_boundary(this->architecture_); + + auto check_finish = [&mapping_frontier]() { + for (const std::pair& pair : + mapping_frontier->linear_boundary->get()) { + Edge e = mapping_frontier->circuit_.get_nth_out_edge( + pair.second.first, pair.second.second); + Vertex v = mapping_frontier->circuit_.target(e); + OpType ot = mapping_frontier->circuit_.get_OpType_from_Vertex(v); + if (!is_final_q_type(ot) && ot != OpType::ClOutput) { + return false; + } + } + return true; + }; + + bool circuit_modified = !check_finish(); + while (!check_finish()) { + // The order methods are passed in std::vector is + // the order they are run + // If a method performs better but only on specific subcircuits, + // rank it earlier in the passed vector + bool valid_methods = false; + for (const auto& rm : routing_methods) { + // true => can use held routing method + std::pair bool_map = + rm->routing_method(mapping_frontier, this->architecture_); + if (bool_map.first) { + valid_methods = true; + if (bool_map.second.size() > 0) { + std::map node_map; + for (const auto& x : bool_map.second) { + node_map.insert({Node(x.first), Node(x.second)}); + } + for (const std::pair& swap : + BestTsaWithArch::get_swaps(*this->architecture_, node_map)) { + mapping_frontier->add_swap(swap.first, swap.second); + } + } + break; + } + } + if (!valid_methods) { + throw MappingManagerError( + "No RoutingMethod suitable to map given subcircuit."); + } + // find next routed/unrouted boundary given updates + mapping_frontier->advance_frontier_boundary(this->architecture_); + } + + // check all nodes placed + + bool found_unplaced_qubit = false; + + // search for unplaced qubitto speed up the runtime + for (Qubit q : mapping_frontier->circuit_.all_qubits()) { + if (!this->architecture_->node_exists(Node(q))) { + found_unplaced_qubit = true; + break; + } + } + + if (found_unplaced_qubit && label_isolated_qubits) { + circuit_modified = true; + qubit_vector_t q_vec = mapping_frontier->circuit_.all_qubits(); + unit_map_t qubit_to_nodes_place; + node_set_t node_set_placed; + + for (Qubit q : q_vec) { + if (this->architecture_->node_exists(Node(q))) { + qubit_to_nodes_place.insert({q, Node(q)}); + node_set_placed.insert(Node(q)); + } + } + + node_vector_t nodes_vec = this->architecture_->get_all_nodes_vec(); + + // place all unplaced qubits + + for (Qubit q : q_vec) { + if (!this->architecture_->node_exists(Node(q))) { + // found unplaced qubit + + unsigned index_to_use = 0; + while (node_set_placed.find(nodes_vec[index_to_use]) != + node_set_placed.end()) { + ++index_to_use; + } + qubit_to_nodes_place.insert({q, nodes_vec[index_to_use]}); + node_set_placed.insert(nodes_vec[index_to_use]); + mapping_frontier->update_bimaps( + mapping_frontier->get_qubit_from_circuit_uid(q), + nodes_vec[index_to_use]); + } + } + + mapping_frontier->update_linear_boundary_uids(qubit_to_nodes_place); + mapping_frontier->circuit_.rename_units(qubit_to_nodes_place); + } + + return circuit_modified; +} +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/MultiGateReorder.cpp b/tket/src/Mapping/MultiGateReorder.cpp new file mode 100644 index 0000000000..6e93c34488 --- /dev/null +++ b/tket/src/Mapping/MultiGateReorder.cpp @@ -0,0 +1,269 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Mapping/MultiGateReorder.hpp" + +#include "Mapping/MappingFrontier.hpp" + +namespace tket { + +MultiGateReorder::MultiGateReorder( + const ArchitecturePtr &_architecture, + MappingFrontier_ptr &_mapping_frontier) + : architecture_(_architecture), mapping_frontier_(_mapping_frontier) { + // This needs to be updated every time the frontier changes + this->u_frontier_edges_ = + convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( + _mapping_frontier->circuit_, _mapping_frontier->linear_boundary)); +} + +// Traverse the DAG to the quantum frontier +// to find the UnitID associated with an VertPort +static UnitID get_unitid_from_vertex_port( + const MappingFrontier_ptr &frontier, const VertPort &vert_port) { + VertPort current_vert_port = vert_port; + while (true) { + auto it = + frontier->linear_boundary->get().find(current_vert_port); + if (it != frontier->linear_boundary->get().end()) { + return it->first; + } + Edge current_e = frontier->circuit_.get_nth_out_edge( + current_vert_port.first, current_vert_port.second); + Vertex prev_vert; + Edge prev_e; + std::tie(prev_vert, prev_e) = + frontier->circuit_.get_prev_pair(current_vert_port.first, current_e); + current_vert_port = {prev_vert, frontier->circuit_.get_source_port(prev_e)}; + } +} + +static bool is_multiq_quantum_gate(const Circuit &circ, const Vertex &vert) { + Op_ptr op = circ.get_Op_ptr_from_Vertex(vert); + return ( + op->get_desc().is_gate() && circ.n_in_edges(vert) > 1 && + circ.n_in_edges_of_type(vert, EdgeType::Quantum) == + circ.n_in_edges(vert) && + circ.n_out_edges_of_type(vert, EdgeType::Quantum) == + circ.n_out_edges(vert)); +} + +static bool is_physically_permitted( + const MappingFrontier_ptr &frontier, const ArchitecturePtr &arc_ptr, + const Vertex &vert) { + std::vector nodes; + for (port_t port = 0; port < frontier->circuit_.n_ports(vert); ++port) { + nodes.push_back(Node(get_unitid_from_vertex_port(frontier, {vert, port}))); + } + return frontier->valid_boundary_operation( + arc_ptr, frontier->circuit_.get_Op_ptr_from_Vertex(vert), nodes); +} + +// This method will try to commute a vertex to the quantum frontier +static std::optional> try_find_commute_edges( + const Circuit &circ, const EdgeVec &frontier_edges, const Vertex &vert) { + // Initialize to be the in_edges for the given vertex + EdgeVec current_edges = circ.get_in_edges(vert); + EdgeVec initial_edges(current_edges.begin(), current_edges.end()); + + Op_ptr current_op = circ.get_Op_ptr_from_Vertex(vert); + // Record the colour of each port of the vertex. + std::vector> colours; + for (const Edge &edge : current_edges) { + port_t target_port = circ.get_target_port(edge); + std::optional colour = current_op->commuting_basis(target_port); + colours.push_back(colour); + } + // Stores all edges which the vertex can be commuted to + EdgeVec dest_edges; + while (true) { + // The vertex can be commuted to the front + bool success = true; + for (unsigned i = 0; i < current_edges.size(); ++i) { + // Check if the edge is already in the quantum frontier + if (std::find( + frontier_edges.begin(), frontier_edges.end(), current_edges[i]) != + frontier_edges.end()) { + dest_edges.push_back(current_edges[i]); + continue; + } + // Check prev_op is a gate + Vertex prev_vert = circ.source(current_edges[i]); + Op_ptr prev_op = circ.get_Op_ptr_from_Vertex(prev_vert); + if (!prev_op->get_desc().is_gate()) { + // not commute + return std::nullopt; + } + + // Check commute + port_t source_port = circ.get_source_port(current_edges[i]); + if (!prev_op->commutes_with_basis(colours[i], source_port)) { + // not commute + return std::nullopt; + } else { + // Update dest_edges + Vertex prev_prev_v; + Edge prev_e; + std::tie(prev_prev_v, prev_e) = + circ.get_prev_pair(prev_vert, current_edges[i]); + dest_edges.push_back(prev_e); + } + // Only true if all edges are in frontier + success = false; + } + if (success) { + std::pair p(initial_edges, dest_edges); + return p; + } else { + current_edges = dest_edges; + dest_edges = {}; + } + } +} + +static void partial_rewire( + const Vertex &vert, Circuit &circ, EdgeVec &src_edges, + EdgeVec &dest_edges) { + // move the vertex to the frontier + // Notice that if one of the vertex's in edge is already a destination + // edge then the circuit::remove_vertex will delete the destination edge + // hence circuit::rewire would result in an error due to the missing edge. + // We need a partial rewire for that reason. + // Example: + // Moving the second vertex (CX gate) to the front we only need to rewire + // the "x" part. + // --o----- + // | + // --x--x-- + // | + // -----o-- + + for (unsigned i = 0; i < dest_edges.size(); i++) { + Edge &dest_in_edge = dest_edges[i]; + Edge &curr_in_edge = src_edges[i]; + // If the vertex is already connected to an edge in the frontier, do + // nothing. + if (dest_in_edge != curr_in_edge) { + // Add first edge + Vertex dest_prev_vert = circ.source(dest_in_edge); + circ.add_edge( + {dest_prev_vert, circ.get_source_port(dest_in_edge)}, + {vert, circ.get_target_port(curr_in_edge)}, EdgeType::Quantum); + // Add second edge + Vertex curr_next_vert; + Edge curr_out_edge; + Vertex dest_next_vert = circ.target(dest_in_edge); + std::tie(curr_next_vert, curr_out_edge) = + circ.get_next_pair(vert, curr_in_edge); + circ.add_edge( + {vert, circ.get_source_port(curr_out_edge)}, + {dest_next_vert, circ.get_target_port(dest_in_edge)}, + EdgeType::Quantum); + // Add third edge + Vertex curr_prev_vert = circ.source(curr_in_edge); + circ.add_edge( + {curr_prev_vert, circ.get_source_port(curr_in_edge)}, + {curr_next_vert, circ.get_target_port(curr_out_edge)}, + EdgeType::Quantum); + // Remove edges + circ.remove_edge(dest_in_edge); + circ.remove_edge(curr_in_edge); + circ.remove_edge(curr_out_edge); + } + } +} + +bool MultiGateReorder::solve(unsigned max_depth, unsigned max_size) { + // Assume the frontier has been advanced + + // store a copy of the original this->mapping_frontier_->quantum_boundray + // this object will be updated and reset throughout the procedure + // so need to return it to original setting at end. + unit_vertport_frontier_t copy; + for (const std::pair &pair : + this->mapping_frontier_->linear_boundary->get()) { + copy.insert({pair.first, pair.second}); + } + // Get a subcircuit only for iterating vertices + Subcircuit circ = + this->mapping_frontier_->get_frontier_subcircuit(max_depth, max_size); + + // for return value + bool modification_made = false; + // since we assume that the frontier has been advanced + // we are certain that any multi-q vert lies after the frontier + for (const Vertex &vert : circ.verts) { + // Check if the vertex is: + // 1. physically permitted + // 2. is a multi qubit quantum operation without classical controls + if (is_multiq_quantum_gate(this->mapping_frontier_->circuit_, vert) && + is_physically_permitted( + this->mapping_frontier_, this->architecture_, vert)) { + std::optional> commute_pairs = + try_find_commute_edges( + this->mapping_frontier_->circuit_, this->u_frontier_edges_, vert); + + if (commute_pairs != std::nullopt) { + modification_made = true; + partial_rewire( + vert, this->mapping_frontier_->circuit_, (*commute_pairs).first, + (*commute_pairs).second); + // Update the frontier + this->mapping_frontier_->advance_frontier_boundary(this->architecture_); + this->u_frontier_edges_ = + convert_u_frontier_to_edges(*frontier_convert_vertport_to_edge( + this->mapping_frontier_->circuit_, + this->mapping_frontier_->linear_boundary)); + } + } + } + // Return the quantum boundary to its original setting + this->mapping_frontier_->set_linear_boundary(copy); + return modification_made; +} + +MultiGateReorderRoutingMethod::MultiGateReorderRoutingMethod( + unsigned _max_depth, unsigned _max_size) + : max_depth_(_max_depth), max_size_(_max_size) {} + +std::pair MultiGateReorderRoutingMethod::routing_method( + MappingFrontier_ptr &mapping_frontier, + const ArchitecturePtr &architecture) const { + MultiGateReorder mr(architecture, mapping_frontier); + return {mr.solve(this->max_depth_, this->max_size_), {}}; +} + +unsigned MultiGateReorderRoutingMethod::get_max_depth() const { + return this->max_depth_; +} + +unsigned MultiGateReorderRoutingMethod::get_max_size() const { + return this->max_size_; +} + +nlohmann::json MultiGateReorderRoutingMethod::serialize() const { + nlohmann::json j; + j["depth"] = this->max_depth_; + j["size"] = this->max_size_; + j["name"] = "MultiGateReorderRoutingMethod"; + return j; +} + +MultiGateReorderRoutingMethod MultiGateReorderRoutingMethod::deserialize( + const nlohmann::json &j) { + return MultiGateReorderRoutingMethod( + j.at("depth").get(), j.at("size").get()); +} + +} // namespace tket diff --git a/tket/src/Mapping/RoutingMethodCircuit.cpp b/tket/src/Mapping/RoutingMethodCircuit.cpp new file mode 100644 index 0000000000..22bd7044e6 --- /dev/null +++ b/tket/src/Mapping/RoutingMethodCircuit.cpp @@ -0,0 +1,78 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "RoutingMethodCircuit.hpp" + +namespace tket { + +RoutingMethodCircuit::RoutingMethodCircuit( + const std::function( + const Circuit&, const ArchitecturePtr&)> + _route_subcircuit, + unsigned _max_size, unsigned _max_depth) + : route_subcircuit_(_route_subcircuit), + max_size_(_max_size), + max_depth_(_max_depth){}; + +std::pair RoutingMethodCircuit::routing_method( + MappingFrontier_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const { + // Produce subcircuit and circuit + Subcircuit frontier_subcircuit = mapping_frontier->get_frontier_subcircuit( + this->max_depth_, this->max_size_); + Circuit frontier_circuit = + mapping_frontier->circuit_.subcircuit(frontier_subcircuit); + frontier_circuit.rename_units( + mapping_frontier->get_default_to_linear_boundary_unit_map()); + + // get routed subcircuit + std::tuple routed_subcircuit = + this->route_subcircuit_(frontier_circuit, architecture); + + if (!std::get<0>(routed_subcircuit)) { + return {false, {}}; + } + + // update unit id at boundary in case of relabelling + // The route_subcircuit_ method populates its initial map + // with unit ids from the circuit. e.g. Initial map from frontier == + // q[0]:unplaced[0], circuit.all_qubits() == unplaced[0]. Then the produced + // initial map == unplaced[0]:node[0] We have to update the initial map to + // q[0]:node[0] + mapping_frontier->update_linear_boundary_uids(std::get<2>(routed_subcircuit)); + for (const auto& pair : std::get<2>(routed_subcircuit)) { + mapping_frontier->update_bimaps( + mapping_frontier->get_qubit_from_circuit_uid(pair.first), pair.second); + } + + unit_map_t swap_permutation; + for (const auto& pair : std::get<2>(routed_subcircuit)) { + if (pair.first != pair.second && + architecture->node_exists(Node(pair.first))) { + swap_permutation.insert(pair); + } + } + // permute edges held by unitid at out boundary due to swaps + mapping_frontier->permute_subcircuit_q_out_hole( + std::get<3>(routed_subcircuit), frontier_subcircuit); + + // substitute old boundary with new cirucit + std::get<1>(routed_subcircuit).flatten_registers(); + mapping_frontier->circuit_.substitute( + std::get<1>(routed_subcircuit), frontier_subcircuit); + // return initial unit_map_t incase swap network required + return {true, swap_permutation}; +} + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/RoutingMethodJson.cpp b/tket/src/Mapping/RoutingMethodJson.cpp new file mode 100644 index 0000000000..103203ff1b --- /dev/null +++ b/tket/src/Mapping/RoutingMethodJson.cpp @@ -0,0 +1,63 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Mapping/RoutingMethodJson.hpp" + +#include "Mapping/LexiLabelling.hpp" + +namespace tket { + +void to_json(nlohmann::json& j, const RoutingMethod& rm) { j = rm.serialize(); } + +void from_json(const nlohmann::json& /*j*/, RoutingMethod& rm) { + rm = RoutingMethod(); +} + +void to_json(nlohmann::json& j, const std::vector& rmp_v) { + for (const auto& r : rmp_v) { + j.push_back(*r); + } +} + +void from_json(const nlohmann::json& j, std::vector& rmp_v) { + for (const auto& c : j) { + std::string name = c.at("name").get(); + if (name == "LexiLabellingMethod") { + rmp_v.push_back(std::make_shared( + LexiLabellingMethod::deserialize(c))); + } else if (name == "LexiRouteRoutingMethod") { + rmp_v.push_back(std::make_shared( + LexiRouteRoutingMethod::deserialize(c))); + } else if (name == "RoutingMethod") { + rmp_v.push_back(std::make_shared()); + } else if (name == "AASRouteRoutingMethod") { + rmp_v.push_back(std::make_shared( + AASRouteRoutingMethod::deserialize(c))); + } else if (name == "AASLabellingMethod") { + rmp_v.push_back(std::make_shared( + AASLabellingMethod::deserialize(c))); + } else if (name == "MultiGateReorderRoutingMethod") { + rmp_v.push_back(std::make_shared( + MultiGateReorderRoutingMethod::deserialize(c))); + } else if (name == "BoxDecompositionRoutingMethod") { + rmp_v.push_back(std::make_shared( + BoxDecompositionRoutingMethod::deserialize(c))); + } else { + std::logic_error( + "Deserialization for given RoutingMethod not supported."); + } + } +} + +} // namespace tket diff --git a/tket/src/Routing/Verification.cpp b/tket/src/Mapping/Verification.cpp similarity index 100% rename from tket/src/Routing/Verification.cpp rename to tket/src/Mapping/Verification.cpp diff --git a/tket/src/Mapping/include/Mapping/AASLabelling.hpp b/tket/src/Mapping/include/Mapping/AASLabelling.hpp new file mode 100644 index 0000000000..d788612559 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/AASLabelling.hpp @@ -0,0 +1,48 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class AASLabellingMethod : public RoutingMethod { + public: + /** + * Checking and Routing methods redefined for dynamically assigning qubits to + * some Architecture. + */ + AASLabellingMethod(){}; + + /** + * will place all the qubits of the given circuit that are not placed at the + * moment. All nodes assigend to placed qubits will not be changed + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return bool if the method has been executed and logical to Physical + * mapping at boundary due to modification. + * + */ + std::pair routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + nlohmann::json serialize() const override; + + static AASLabellingMethod deserialize(const nlohmann::json& j); +}; +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/AASRoute.hpp b/tket/src/Mapping/include/Mapping/AASRoute.hpp new file mode 100644 index 0000000000..b8935d5fc9 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/AASRoute.hpp @@ -0,0 +1,76 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "ArchAwareSynth/SteinerForest.hpp" +#include "Mapping/LexicographicalComparison.hpp" +#include "Mapping/MappingFrontier.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class AASRouteError : public std::logic_error { + public: + explicit AASRouteError(const std::string& message) + : std::logic_error(message) {} +}; + +// Child class of RoutingMethod, with overloaded methods for routing +// MappingFrontier objects +class AASRouteRoutingMethod : public RoutingMethod { + public: + /** + * Checking and Routing methods for phase poly boxes using architecture aware + * synthesis + * @param _aaslookahead lookahead that should be used in the aas routing + * @param _cnotsynthtype type of cnot synthesis that should be used + */ + AASRouteRoutingMethod( + unsigned _aaslookahead, + aas::CNotSynthType _cnotsynthtype = aas::CNotSynthType::Rec); + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return bool if the method has been executed and logical to Physical + * mapping at boundary due to modification. + * + */ + std::pair routing_method( + std::shared_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + /** + * @return cnot synth type of this routing method + */ + aas::CNotSynthType get_cnotsynthtype() const; + + /** + * @return aaslookahead of this routing method + */ + unsigned get_aaslookahead() const; + + nlohmann::json serialize() const override; + + static AASRouteRoutingMethod deserialize(const nlohmann::json& j); + + private: + // type of cnot synthesis that should be used + aas::CNotSynthType cnotsynthtype_; + // lookahead that should be used in the aas routing + unsigned aaslookahead_; +}; +} // namespace tket diff --git a/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp new file mode 100644 index 0000000000..e194d00e81 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/BoxDecomposition.hpp @@ -0,0 +1,69 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/MappingFrontier.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class BoxDecomposition { + public: + /** + * Class Constructor + * @param _architecture Architecture object added operations must respect + * @param _mapping_frontier Contains Circuit object to be modified + */ + BoxDecomposition( + const ArchitecturePtr& _architecture, + MappingFrontier_ptr& _mapping_frontier); + + /** + * Decompose any boxes in the next slice after the frontier + * + * @return True if Box is decomposed + */ + bool solve(); + + private: + // Architecture all new physical operations must respect + ArchitecturePtr architecture_; + MappingFrontier_ptr mapping_frontier_; +}; + +class BoxDecompositionRoutingMethod : public RoutingMethod { + public: + /** + * Decompose any boxes on the frontier + */ + BoxDecompositionRoutingMethod(); + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + std::pair routing_method( + MappingFrontier_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + nlohmann::json serialize() const override; + + static BoxDecompositionRoutingMethod deserialize(const nlohmann::json& /*j*/); +}; + +} // namespace tket diff --git a/tket/src/Mapping/include/Mapping/LexiLabelling.hpp b/tket/src/Mapping/include/Mapping/LexiLabelling.hpp new file mode 100644 index 0000000000..9d5268e19b --- /dev/null +++ b/tket/src/Mapping/include/Mapping/LexiLabelling.hpp @@ -0,0 +1,46 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class LexiLabellingMethod : public RoutingMethod { + public: + /** + * Checking and Routing methods redefined for dynamically assigning qubits to + * some Architecture. + */ + LexiLabellingMethod(){}; + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return True if transformation made, Logical to Physical mapping at + * boundary due to modification. + * + */ + std::pair routing_method( + MappingFrontier_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + nlohmann::json serialize() const override; + + static LexiLabellingMethod deserialize(const nlohmann::json& j); +}; +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/LexiRoute.hpp b/tket/src/Mapping/include/Mapping/LexiRoute.hpp new file mode 100644 index 0000000000..d711d10376 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/LexiRoute.hpp @@ -0,0 +1,184 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/LexicographicalComparison.hpp" +#include "Mapping/MappingFrontier.hpp" +#include "Mapping/RoutingMethod.hpp" +#include "Mapping/RoutingMethodJson.hpp" + +namespace tket { + +class LexiRouteError : public std::logic_error { + public: + explicit LexiRouteError(const std::string& message) + : std::logic_error(message) {} +}; + +/** + * A class for modifiying a Circuit held in a MappingFrontier object + * with either an Architecture permitted single SWAP gate or BRIDGE gate. + * Used in the LexiRouteRoutingMethod class which provides a subcircuit + * modification method for MappingManager. Used in solution presented in "On the + * qubit routing problem" -> arXiv:1902.08091 + */ +class LexiRoute { + public: + /** + * Class Constructor + * @param _architecture Architecture object added operations must respect + * @param _mapping_frontier Contains Circuit object to be modified + */ + LexiRoute( + const ArchitecturePtr& _architecture, + MappingFrontier_ptr& _mapping_frontier); + + /** + * When called, LexiRoute::solve will modify the Circuit held in + * MappingFrontier object passed at class construction. Either a SWAP gate + * will be inserted at the input boundary of the held Circuit or a CX gate + * will be transformed into a BRIDGE gate. The added SWAP or BRIDGE gate will + * be valid for the Architecture passed at class construction. + * The decision making is based on the heuristic outlined in arXiv:1902.08091. + * + * @param lookahead Number of slices to lookahead at when determining best + * SWAP or BRIDGE + * + * @return True if solve has modified circuit for mapping purposes + */ + bool solve(unsigned lookahead); + + /** + * When called an "unlabelled" Qubit in the Circuit may be relabelled to a + * Node in the Architecture, or an "unlabelled" Qubit may have its path merged + * with an ancilla qubit. The decision making is based on the heuristic + * outlined in arXiv:1902.08091. + * + * @return True if solve_labelling has modified circuit for mapping purposes + */ + bool solve_labelling(); + + private: + /** Only considers two-qubit vertices if both qubits are labelled to + * Architecture */ + enum class AssignedOnly { Yes, No }; + /** Returns a bool confirming if vertices are valid for LexiRoute::solve */ + enum class CheckRoutingValidity { Yes, No }; + /** Returns a bool confirming if vertices are valid for + * LexiRoute::solve_labelling */ + enum class CheckLabellingValidity { Yes, No }; + + /** + * this->interacting_uids_ attribute is a map where key is one UnitID + * and value is the UnitID it needs to be adjacent to. + * This map is implicitly updated whenever a logical SWAP is inserted. + * set_interacting_uids determines this map for the first parallel set of + * interacting UnitID in the Circuit held in this->mapping_frontier_ + * @param assigned_only If Yes, only include interactions where both UnitID + * are in this->architecture_. + * @param route_check If Yes, return false if solve not possible + * @param label_check If Yes, return false if solve_labelling not possible + * + * @return bool depending on ENUM conditions + */ + bool set_interacting_uids( + AssignedOnly assigned_only, CheckRoutingValidity route_check, + CheckLabellingValidity label_check); + + /** + * If there is some "free" Node in Architecture at distance "distances" on + * the connectivity graph, assign (relable) UnitID assignee to it. "free" + * => not in Circuit. If no unassigned node at distances from root, return + * false. + * @param assignee UnitID not in Architecture to relabel + * @param root Node in Architecture + * @param distances Distance at which to find free Node from root at + * @return True if assigned, else False + */ + bool assign_at_distance( + const UnitID& assignee, const Node& root, unsigned distances); + + /** + * If this->set_interacting_uids assigned_only bool is false then the + * this->interacting_uids attribute may have key and value UnitID not in + * this->architecture_. + * update_labelling assigns these non-architecture UnitID to some Architecture + * UnitID, updating the this->labelling_ attribute. + * @return True if anything relabelled, else false + */ + bool update_labelling(); + + /** + * Returns a set of pair of UnitID, each denoting a SWAP. + * Returned SWAP have at least one UnitID in interacting_uids_. + * This is such that enacting any of these SWAP will alter the distance + * between some interacting UnitID. + * @return std::pair suitable for addition to Circuit + */ + swap_set_t get_candidate_swaps(); + + /** + * Proposed swap will have two Node + * Each of these Node may be in some interaction in the first layer of circuit + * held in mapping_frontier. If either of these Node are in an interaction, + * check whether said interaction is a CX interaction, and if the pair of Node + * in the interaction are at distance 2. If true, compare lexicographical + * distances between no swap and given swap assuming distance 2 interactions + * are complete. If no swap is better, update return object to reflect this. + * @param swap Pair of Node comprising SWAP for checking + * @param lookahead Number of steps of lookahead emplyed for comparison + * @return Pair of bool, where true implies BRIDGE to be added + */ + std::pair check_bridge( + const std::pair& swap, unsigned lookahead); + + /** + * Returns a pair of distances, where the distances are between n1 & p1, and + * n2 & p2. Pair object is ordered such that the greatest distance is first. + * + * @param p0_first First Node in first interaction to find distance between + * @param p0_second Second Node in first interaction to find distance between + * @param p1_first First Node in second interaction to find distance between + * @param p1_second Second Node in second interaction to find distance between + * @return Pair of size_t, being distances on architecture graph + */ + const std::pair pair_distances( + const Node& p0_first, const Node& p0_second, const Node& p1_first, + const Node& p1_second) const; + + /** + * It is always expected that at least one Node in a SWAP will be in some + * interaction. This method checks that the given swap will strictly decrease + * the distance for this interaction, and removes it from the swaps set if + * not. + * + * @param swaps Potential swaps to remove from + */ + void remove_swaps_decreasing(swap_set_t& swaps); + + // Architecture all new physical operations must respect + ArchitecturePtr architecture_; + // Contains circuit for finding SWAP from and non-routed/routed boundary + MappingFrontier_ptr& mapping_frontier_; + // Map between UnitID and UnitID they interact with at boundary + unit_map_t interacting_uids_; + // Map between original circuit UnitID and new UnitID due to dynamic + // placement + unit_map_t labelling_; + // Set tracking which Architecture Node are present in Circuit + std::set assigned_nodes_; +}; + +} // namespace tket diff --git a/tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp b/tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp new file mode 100644 index 0000000000..be13fc51a9 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/LexiRouteRoutingMethod.hpp @@ -0,0 +1,60 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class LexiRouteRoutingMethod : public RoutingMethod { + public: + /** + * Checking and Routing methods redefined using LexiRoute. Only circuit depth, + * corresponding to lookahead, is a required parameter. + * + * @param _max_depth Number of layers of gates checked inr outed subcircuit. + */ + LexiRouteRoutingMethod(unsigned _max_depth = 100); + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * + * @return True if modification made, map between relabelled Qubit, always + * empty. + * + */ + std::pair routing_method( + MappingFrontier_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + /** + * @return Max depth used in lookahead + */ + unsigned get_max_depth() const; + + nlohmann::json serialize() const override; + + static LexiRouteRoutingMethod deserialize(const nlohmann::json& j); + + private: + unsigned max_depth_; +}; + +JSON_DECL(LexiRouteRoutingMethod); + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp b/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp new file mode 100644 index 0000000000..f597ad4df4 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/LexicographicalComparison.hpp @@ -0,0 +1,103 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Architecture/Architecture.hpp" +#include "Utils/BiMapHeaders.hpp" +#include "Utils/UnitID.hpp" + +namespace tket { + +typedef std::map interacting_nodes_t; +typedef std::pair swap_t; +typedef std::set swap_set_t; +typedef std::vector lexicographical_distances_t; + +class LexicographicalComparisonError : public std::logic_error { + public: + explicit LexicographicalComparisonError(const std::string& message) + : std::logic_error(message) {} +}; + +/** + * A class for running lexicographical comparisons of SWAP gates for some + * architecture and set of interacting qubits. + * Used in the 'LexiRoute' method for routing subcircuits as part of the + * MappingManager framework. + * Used in solution presented in "On the qubit routing problem" -> + * arXiv:1902.08091 + */ +class LexicographicalComparison { + public: + /** + * Class constructor + * @param _architecture Architecture object for calcuating distances from + * @param _interacting_nodes Pairs of physical Node with interacting logical + * Qubit + */ + LexicographicalComparison( + const ArchitecturePtr& _architecture, + const interacting_nodes_t& _interacting_nodes); + + /** + * Modifies some distances object by reference. + * Updates the distance between pair Node in interaction by increment. + * Increment and Interaction determined by some SWAP. + * + * @param distances Distances object updated. + * @param interaction Node pair increment distance indexing found from + * @param increment Amount to modify distance index by + */ + void increment_distances( + lexicographical_distances_t& distances, + const std::pair& interaction, int increment) const; + + /** + * Returns a held lexicograhically ordered vector of distances between nodes + * and architectuture class object is constructed from, with changes + * from increment distances. + * + * @return Lexicographically ordered distance vector + */ + lexicographical_distances_t get_lexicographical_distances() const; + + /** + * Takes a copy of Distance vector held in object and modifies it to reflect + * how distance between pairs of interacting nodes in attribute would change + * given the logical qubits asisgned to the physical node in "swap" swapped. + * + * @param swap Physical Node Logical Qubit swapped between to derive copy + * distance + */ + lexicographical_distances_t get_updated_distances(const swap_t& swap) const; + + /** + * For each swap in candidate_swaps, removes swap from set if the distance + * vector produced by modifying this->lexicographical_distances by said swap + * is lexicographically smaller to that produced for any other swap. In this + * way, only swaps with lexicographically identical swap for the given + * interacting nodes remain after the method is called. + * + * @param candidate_swaps Potential pairs of nodes for comparing and removing + */ + void remove_swaps_lexicographical(swap_set_t& candidate_swaps) const; + + private: + ArchitecturePtr architecture_; + lexicographical_distances_t lexicographical_distances; + interacting_nodes_t interacting_nodes_; +}; + +} // namespace tket diff --git a/tket/src/Mapping/include/Mapping/MappingFrontier.hpp b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp new file mode 100644 index 0000000000..0b4edc98e0 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/MappingFrontier.hpp @@ -0,0 +1,210 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Architecture/Architecture.hpp" +#include "Circuit/Circuit.hpp" +#include "Utils/BiMapHeaders.hpp" +#include "Utils/UnitID.hpp" + +namespace tket { + +typedef sequenced_bimap_t unit_vertport_frontier_t; + +// list of error types to throw out +class MappingFrontierError : public std::logic_error { + public: + explicit MappingFrontierError(const std::string& message) + : std::logic_error(message) {} +}; + +/** + * linear_boundary stored as vertport so that correct edge can be recovered + * after subcircuit substitution method uses Vertex and port_t and + * Circuit::get_nth_out_edge to generate unit_frontier_t object + */ +std::shared_ptr frontier_convert_vertport_to_edge( + const Circuit& circuit, + const std::shared_ptr& u_frontier); + +/** + * convert_u_frontier_to_edges + * Subcircuit requires EdgeVec, not unit_frontier_t as boundary information + * Helper Functions to convert types + */ +EdgeVec convert_u_frontier_to_edges(const unit_frontier_t& u_frontier); +struct MappingFrontier { + /** + * VertPort instead of Edge as Edge changes in substitution, but Vertex and + * Port key information + */ + std::shared_ptr linear_boundary; + + std::shared_ptr boolean_boundary; + + /** + * Circuit held by reference and directly modified with SWAP (or other + * relevant) gates. + */ + Circuit& circuit_; + + std::set ancilla_nodes_; + + std::shared_ptr bimaps_; + + MappingFrontier(Circuit& _circuit); + + MappingFrontier(Circuit& _circuit, std::shared_ptr _bimaps); + + // copy constructor + MappingFrontier(const MappingFrontier& mapping_frontier); + + /** + * Given some Circuit Cut (or routed/unrouted boundary), advances the cut to + * the next cut of just two-qubit vertices, not including the current + * boundary. + * @param max_advance maximum number of cuts checked before terminating + */ + void advance_next_2qb_slice(unsigned max_advance); + + /** + * mapping_frontier data members updated to reflect + * the routed/non-routed boundary of mapping_frontier->circ + * architecture.valid_gate confirms whether circuit vertices are physically + * valid + * + * @param architecture Architecture governing physically allowed operations + */ + void advance_frontier_boundary(const ArchitecturePtr& architecture); + + /** + * Subcircuit produced from gates after held boundary. + * @param _max_subcircuit_depth + * @param _max_subcircuit_size + * + */ + Subcircuit get_frontier_subcircuit( + unsigned _max_subcircuit_depth, unsigned _max_subcircuit_size) const; + + /** + * update_linear_boundary_uids + * route_circuit has no constraint that passed circuits must have qubits + * relabelled to architecture nodes route_subcircuit is allowed to either + * permute labelled physical qubits, or label logical qubits if logical qubits + * are labelled physical, update_linear_boundary updates UnitID in + * this->linear_boundary to reflect this change Also updates this->circuit_ + * to reflect this relabelling + * + * @param relabel_map map between current UnitID's in linear_boundary and new + * UnitID's. + */ + void update_linear_boundary_uids(const unit_map_t& relabel_map); + + /** + * permute_subcircuit_q_out_hole + * + * Given initial permutation of UnitIDs, finds final permutation via SWAPs in + * circuit and updates mapping_frontier subcircuit q_out_hole to reflect this + * + * @param final_permutation map between initial and final physical qubits for + * each logical qubit, used to permute subcircuit.q_out_hole + * @param subcircuit Subcircuit for rearranging boundary + */ + void permute_subcircuit_q_out_hole( + const unit_map_t& final_permutation, Subcircuit& subcircuit); + + /** + * get_default_to_linear_boundary_unit_map + * subcircuit circuits created with default q register + * method returns map between default q register and physical qubit + * permutation at frontier used for circuit.rename_units + */ + unit_map_t get_default_to_linear_boundary_unit_map() const; + + /** + * add_swap + * Inserts an OpType::SWAP gate into the uid_0 and uid_1 edges held in + * linear_boundary. This directly modifies circuit_. + * Updates linear_boundary to reflect new edges. + * + * @param uid_0 First Node in SWAP + * @param uid_1 Second Node in SWAP + */ + void add_swap(const UnitID& uid_0, const UnitID& uid_1); + + /** + * add_bridge + * Inserts an OpType::BRIDGE gate into edges relevant to passed UnitID. + * + * @param control First Node in BRIDGE + * @param central Second Node in BRIDGE + * @param target Third Node in BRIDGE + */ + void add_bridge( + const UnitID& control, const UnitID& central, const UnitID& target); + + /** + * add_ancilla + * Adds an Ancillary UnitID to Circuit and tracked information + * + * @param ancilla UnitID of added ancilla + */ + void add_ancilla(const UnitID& ancilla); + + /** + * merge_ancilla + * Rewires this->circuit_.dag such that in wire to ancilla Output vertex + * is now mapped to out wire of merge Input vertex. + * + * @param merge UnitID to which ancilla path is prepended + * @param ancilla UnitID of ancilla opeartions + */ + void merge_ancilla(const UnitID& merge, const UnitID& ancilla); + + /** + * Assigns the linear_boundary_ attribute to that passed to method. + * + * @param new_boundary Object to reassign with. + */ + void set_linear_boundary(const unit_vertport_frontier_t& new_boundary); + + /** + * Returns true if the given operation acting on the given nodes + * can be executed on the Architecture connectivity graph. + * @param architecture given architecture to check the operation on + * @param op operation to check + * @param uids vector of nodes which is included in the operation + */ + bool valid_boundary_operation( + const ArchitecturePtr& architecture, const Op_ptr& op, + const std::vector& uids) const; + + /** + * Update a qubit mapping in both the initial map and the final map + * @param qubit the qubit mapping to be updated + * @param node the new node to be mapped + */ + void update_bimaps(UnitID qubit, UnitID node); + + /** + * Get the qubit in the initial map given it's mapped uid. + * @param uid UnitID in the circuit + */ + UnitID get_qubit_from_circuit_uid(const UnitID& uid); +}; + +typedef std::shared_ptr MappingFrontier_ptr; + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MappingManager.hpp b/tket/src/Mapping/include/Mapping/MappingManager.hpp new file mode 100644 index 0000000000..1f19acaec6 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/MappingManager.hpp @@ -0,0 +1,81 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Architecture/Architecture.hpp" +#include "Circuit/Circuit.hpp" +#include "Mapping/RoutingMethod.hpp" +#include "Utils/UnitID.hpp" + +namespace tket { + +// list of error types to throw out +class MappingManagerError : public std::logic_error { + public: + explicit MappingManagerError(const std::string& message) + : std::logic_error(message) {} +}; + +class MappingManager { + public: + /* Mapping Manager Constructor */ + // MappingManager object defined by Architecture initialised with + MappingManager(const ArchitecturePtr& _architecture); + + /** + * route_circuit + * Referenced Circuit modified such that all multi-qubit gates are permitted + * by this->architecture_ RoutingIncompability thrown if Circuit has more + * logical qubits than Architecture has physical qubits RoutingIncompability + * thrown if Circuit has a gate of OpType not in Architecture's permitted + * OpTypes + * + * @param circuit Circuit to be routed + * @param routing_methods Ranked RoutingMethod objects to use for routing + * segments. + * @param label_isolated_qubits will not label qubits without gates or only + * single qubit gates on them if this is set false + * @return True if circuit is modified + */ + bool route_circuit( + Circuit& circuit, const std::vector& routing_methods, + bool label_isolated_qubits = true) const; + + /** + * route_circuit_maps + * Referenced Circuit modified such that all multi-qubit gates are permitted + * by this->architecture_ RoutingIncompability thrown if Circuit has more + * logical qubits than Architecture has physical qubits RoutingIncompability + * thrown if Circuit has a gate of OpType not in Architecture's permitted + * OpTypes + * + * @param circuit Circuit to be routed + * @param routing_methods Ranked RoutingMethod objects to use for routing + * segments. + * @param maps For tracking placed and permuted qubits during Compilation + * @param label_isolated_qubits will not label qubits without gates or only + * single qubit gates on them if this is set false + * + * @return True if circuit is modified + */ + bool route_circuit_with_maps( + Circuit& circuit, const std::vector& routing_methods, + std::shared_ptr maps, + bool label_isolated_qubits = true) const; + + private: + ArchitecturePtr architecture_; +}; +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp new file mode 100644 index 0000000000..024bef3f30 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/MultiGateReorder.hpp @@ -0,0 +1,95 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/MappingFrontier.hpp" +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class MultiGateReorder { + public: + /** + * Class Constructor + * @param _architecture Architecture object added operations must respect + * @param _mapping_frontier Contains Circuit object to be modified + */ + MultiGateReorder( + const ArchitecturePtr& _architecture, + MappingFrontier_ptr& _mapping_frontier); + + /** + * Try to commute any multi-qubit gates to the quantum frontier + * @param max_depth Maximum number of layers of gates checked for simultaneous + * commutation. + * @param max_size Maximum number of gates checked for simultaneous + * commutation. + * + * @return true if modification made + */ + bool solve(unsigned max_depth, unsigned max_size); + + private: + // Architecture all new physical operations must respect + ArchitecturePtr architecture_; + MappingFrontier_ptr mapping_frontier_; + EdgeVec u_frontier_edges_; +}; + +class MultiGateReorderRoutingMethod : public RoutingMethod { + public: + /** + * Checking and Routing methods redefined using MultiGateReorder. + * @param _max_depth Maximum number of layers of gates checked for + * simultaneous commutation. + * @param _max_size Maximum number of gates checked for simultaneous + * commutation. + */ + MultiGateReorderRoutingMethod( + unsigned _max_depth = 10, unsigned _max_size = 10); + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Whether circuit is modified and Logical to Physical mapping at + * boundary due to modification (always empty) + * + */ + std::pair routing_method( + MappingFrontier_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const override; + + nlohmann::json serialize() const override; + + static MultiGateReorderRoutingMethod deserialize(const nlohmann::json& j); + + /** + * @return Maximum number of layers of gates checked for simultaneous + * commutation. + */ + unsigned get_max_depth() const; + + /** + * @return Maximum number of gates checked for simultaneous commutation. + */ + unsigned get_max_size() const; + + private: + unsigned max_depth_; + unsigned max_size_; +}; + +} // namespace tket diff --git a/tket/src/Mapping/include/Mapping/RoutingMethod.hpp b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp new file mode 100644 index 0000000000..ac29cbaf37 --- /dev/null +++ b/tket/src/Mapping/include/Mapping/RoutingMethod.hpp @@ -0,0 +1,60 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/MappingFrontier.hpp" +#include "Utils/Json.hpp" + +namespace tket { + +class RoutingMethod { + public: + RoutingMethod(){}; + virtual ~RoutingMethod() {} + + /** + * routing_method modifies circuit held in mapping_frontier with gates for the + * purpose of moving circuit closer to one physically permitted by given + * architecture. Returns a pair with a bool returning whether any modification + * was made and a new initial mapping of qubits in case permutation via swap + * network is then required, or new ancilla qubits are added. This is + * completed by converting boundary subcircuit in mapping frontier to a + * Circuit object which is then passed to route_subcircuit_ as defined in the + * constructor. + * + * Overloaded parameter mapping_frontier contains boundary of routed/unrouted + * circuit for modifying. + * Overloaded parameter architecture provides physical constraints + * + * @return Whether circuit is modified and Logical to Physical mapping at + * boundary due to modification. + * + */ + virtual std::pair routing_method( + MappingFrontier_ptr& /*mapping_frontier*/, + const ArchitecturePtr& /*architecture*/) const { + return {false, {}}; + } + + virtual nlohmann::json serialize() const { + nlohmann::json j; + j["name"] = "RoutingMethod"; + return j; + } +}; + +typedef std::shared_ptr RoutingMethodPtr; + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp new file mode 100644 index 0000000000..ea5de4455e --- /dev/null +++ b/tket/src/Mapping/include/Mapping/RoutingMethodCircuit.hpp @@ -0,0 +1,58 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/RoutingMethod.hpp" + +namespace tket { + +class RoutingMethodCircuit : public RoutingMethod { + public: + virtual ~RoutingMethodCircuit() {} + /** + * RoutingMethodCircuit objects hold methods for partially routing subcircuits + * in the incremental routing of full circuits. + * + * @param _route_subcircuit Function ptr for partial routing method + * @param _max_size Max number of gates in partial routing circuit + * @param _max_depth Max depth of partial routing circuit + */ + RoutingMethodCircuit( + const std::function( + const Circuit&, const ArchitecturePtr&)> + _route_subcircuit, + unsigned _max_size, unsigned _max_depth); + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + std::pair routing_method( + MappingFrontier_ptr& mapping_frontier, + const ArchitecturePtr& architecture) const; + + private: + const std::function( + const Circuit&, const ArchitecturePtr&)> + route_subcircuit_; + unsigned max_size_, max_depth_; +}; + +JSON_DECL(RoutingMethod); + +} // namespace tket diff --git a/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp new file mode 100644 index 0000000000..85853b328f --- /dev/null +++ b/tket/src/Mapping/include/Mapping/RoutingMethodJson.hpp @@ -0,0 +1,40 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Mapping/AASLabelling.hpp" +#include "Mapping/AASRoute.hpp" +#include "Mapping/BoxDecomposition.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRouteRoutingMethod.hpp" +#include "Mapping/MultiGateReorder.hpp" +#include "Mapping/RoutingMethod.hpp" +#include "Utils/Json.hpp" + +namespace tket { + +void to_json(nlohmann::json& j, const RoutingMethod& rm); + +void from_json(const nlohmann::json& /*j*/, RoutingMethod& rm); + +JSON_DECL(RoutingMethod); + +void to_json(nlohmann::json& j, const std::vector& rmp_v); + +void from_json(const nlohmann::json& j, std::vector& rmp_v); + +JSON_DECL(std::vector); + +} // namespace tket diff --git a/tket/src/Routing/include/Routing/Verification.hpp b/tket/src/Mapping/include/Mapping/Verification.hpp similarity index 100% rename from tket/src/Routing/include/Routing/Verification.hpp rename to tket/src/Mapping/include/Mapping/Verification.hpp diff --git a/tket/src/PauliGraph/PauliGraph.cpp b/tket/src/PauliGraph/PauliGraph.cpp index e0b1b83c85..86305fd2ed 100644 --- a/tket/src/PauliGraph/PauliGraph.cpp +++ b/tket/src/PauliGraph/PauliGraph.cpp @@ -313,7 +313,8 @@ void PauliGraph::apply_pauli_gadget_at_end( } PauliGraph::TopSortIterator::TopSortIterator() - : current_vert_(boost::graph_traits::null_vertex()) {} + : pg_(nullptr), + current_vert_(boost::graph_traits::null_vertex()) {} PauliGraph::TopSortIterator::TopSortIterator(const PauliGraph &pg) { if (pg.start_line_.empty()) { diff --git a/tket/src/Routing/CMakeLists.txt b/tket/src/Placement/CMakeLists.txt similarity index 89% rename from tket/src/Routing/CMakeLists.txt rename to tket/src/Placement/CMakeLists.txt index 00e77f61d9..29bd59dd24 100644 --- a/tket/src/Routing/CMakeLists.txt +++ b/tket/src/Placement/CMakeLists.txt @@ -14,20 +14,16 @@ project(tket-${COMP}) -if (NOT ${COMP} STREQUAL "Routing") +if (NOT ${COMP} STREQUAL "Placement") message(FATAL_ERROR "Unexpected component name.") endif() add_library(tket-${COMP} Qubit_Placement.cpp - Swap_Analysis.cpp - Board_Analysis.cpp - Routing.cpp - Slice_Manipulation.cpp subgraph_mapping.cpp Placement.cpp PlacementGraphClasses.cpp - Verification.cpp) + NeighbourPlacements.cpp) list(APPEND DEPS_${COMP} Architecture @@ -37,6 +33,7 @@ list(APPEND DEPS_${COMP} Graphs Ops OpType + TokenSwapping Utils) foreach(DEP ${DEPS_${COMP}}) diff --git a/tket/src/Placement/NeighbourPlacements.cpp b/tket/src/Placement/NeighbourPlacements.cpp new file mode 100644 index 0000000000..517d6db5bf --- /dev/null +++ b/tket/src/Placement/NeighbourPlacements.cpp @@ -0,0 +1,145 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "NeighbourPlacements.hpp" + +#include +#include + +#include "TokenSwapping/SwapListOptimiser.hpp" +#include "Utils/TketLog.hpp" + +namespace tket { + +NeighbourPlacements::NeighbourPlacements( + const Architecture& arc, const qubit_mapping_t& init_map) + : arc_(arc), init_map_(init_map), u_to_node_(), rng_() { + auto nodes = arc_.get_all_nodes_vec(); + for (unsigned i = 0; i < nodes.size(); ++i) { + u_to_node_.left.insert({i, nodes[i]}); + } +} + +NeighbourPlacements::ResultVec NeighbourPlacements::get( + unsigned dist, unsigned n, bool optimise, unsigned seed, + unsigned max_tries) { + rng_.set_seed(seed); + + // define a comparison function for placements + std::vector keys; + for (auto [k, v] : init_map_) { + keys.push_back(k); + } + auto map_compare = [&keys]( + const qubit_mapping_t& a, const qubit_mapping_t& b) { + for (auto k : keys) { + if (a.at(k) < b.at(k)) { + return true; + } else if (a.at(k) > b.at(k)) { + return false; + } + } + return false; + }; + // set of all generated placement maps + std::set placements(map_compare); + + ResultVec resvec; + for (unsigned i = 0; i < n; ++i) { + unsigned n_unsuccessful = 0; + while (n_unsuccessful < max_tries) { + Result res = gen_result(dist, optimise, max_tries); + if (!placements.contains(res.map)) { + resvec.push_back(res); + placements.insert(res.map); + break; + } + ++n_unsuccessful; + } + if (n_unsuccessful == max_tries) { + tket_log()->warn( + "Could not generate " + std::to_string(n) + " distinct placements"); + } + } + return resvec; +} + +NeighbourPlacements::Result NeighbourPlacements::gen_result( + unsigned dist, bool optimise, unsigned max_tries) { + SwapList swaps; + tsa_internal::SwapListOptimiser optimiser; + + // it might be impossible to find `dist` non-trivial swaps + unsigned n_unsuccessful = 0; + + while (swaps.size() < dist && n_unsuccessful < max_tries) { + Swap new_swap = gen_swap(); + + if (optimise) { + SwapList swaps_candidate = swaps; + swaps_candidate.push_back(new_swap); + optimiser.full_optimise(swaps_candidate); + if (swaps_candidate.size() > swaps.size()) { + swaps = std::move(swaps_candidate); + n_unsuccessful = 0; + } else { + ++n_unsuccessful; + } + } else { + swaps.push_back(new_swap); + } + } + + if (n_unsuccessful == max_tries) { + tket_log()->warn( + "Unable to generate " + std::to_string(dist) + + " swaps for given architecture"); + } + + return convert_to_res(swaps.to_vector()); +} + +Swap NeighbourPlacements::gen_swap() { + auto edges = arc_.get_all_edges_vec(); + unsigned m = edges.size(); + auto [n1, n2] = edges[rng_.get_size_t(m - 1)]; + Swap new_swap{u_to_node_.right.at(n1), u_to_node_.right.at(n2)}; + return new_swap; +} + +NeighbourPlacements::Result NeighbourPlacements::convert_to_res( + const SwapVec& swaps) { + NodeSwapVec node_swaps; + for (auto [u1, u2] : swaps) { + node_swaps.push_back({u_to_node_.left.at(u1), u_to_node_.left.at(u2)}); + } + + qubit_bimap_t qubit_to_node; + qubit_to_node.left.insert(init_map_.begin(), init_map_.end()); + for (auto [n1, n2] : node_swaps) { + const Qubit q1 = qubit_to_node.right.at(n1); + const Qubit q2 = qubit_to_node.right.at(n2); + qubit_to_node.left.erase(q1); + qubit_to_node.left.erase(q2); + qubit_to_node.left.insert({q1, n2}); + qubit_to_node.left.insert({q2, n1}); + } + qubit_mapping_t map; + for (auto [k, v] : qubit_to_node.left) { + map.insert({k, v}); + } + return {map, node_swaps}; +} + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Routing/Placement.cpp b/tket/src/Placement/Placement.cpp similarity index 82% rename from tket/src/Routing/Placement.cpp rename to tket/src/Placement/Placement.cpp index 12c3aa018b..e5d2184593 100644 --- a/tket/src/Routing/Placement.cpp +++ b/tket/src/Placement/Placement.cpp @@ -127,15 +127,20 @@ void fill_partial_mapping( } // Default placement methods -bool Placement::place(Circuit &circ_) const { +bool Placement::place( + Circuit &circ_, std::shared_ptr maps) const { qubit_mapping_t map_ = get_placement_map(circ_); - return place_with_map(circ_, map_); + return place_with_map(circ_, map_, maps); } -bool Placement::place_with_map(Circuit &circ_, qubit_mapping_t &map_) { +bool Placement::place_with_map( + Circuit &circ_, qubit_mapping_t &map_, + std::shared_ptr maps) { qubit_vector_t circ_qbs = circ_.all_qubits(); fill_partial_mapping(circ_qbs, map_); - return circ_.rename_units(map_); + bool changed = circ_.rename_units(map_); + changed |= update_maps(maps, map_, map_); + return changed; } qubit_mapping_t Placement::get_placement_map(const Circuit &circ_) const { @@ -149,6 +154,45 @@ std::vector Placement::get_all_placement_maps( return {get_placement_map(circ_)}; } +qubit_mapping_t NaivePlacement::get_placement_map(const Circuit &circ_) const { + return get_all_placement_maps(circ_).at(0); +} + +std::vector NaivePlacement::get_all_placement_maps( + const Circuit &circ_) const { + qubit_mapping_t placement; + qubit_vector_t to_place; + std::vector placed; + + // Find which/if any qubits need placing + for (const Qubit &q : circ_.all_qubits()) { + Node n(q); + if (!this->arc_.node_exists(n)) { + to_place.push_back(n); + } else { + placed.push_back(n); + // if already placed, make sure qubit retains placement + placement.insert({n, n}); + } + } + // avoid doing std::set_difference unless qubits need to be placed + unsigned n_placed = to_place.size(); + if (n_placed > 0) { + std::vector difference, + architecture_nodes = this->arc_.get_all_nodes_vec(); + std::set_difference( + architecture_nodes.begin(), architecture_nodes.end(), placed.begin(), + placed.end(), std::inserter(difference, difference.begin())); + // should always be enough remaining qubits to assign unplaced qubits to + TKET_ASSERT(difference.size() >= n_placed); + for (unsigned i = 0; i < n_placed; i++) { + // naively assign each qubit to some free node + placement.insert({to_place[i], difference[i]}); + } + } + return {placement}; +} + qubit_mapping_t LinePlacement::get_placement_map(const Circuit &circ_) const { return get_all_placement_maps(circ_).at(0); } diff --git a/tket/src/Routing/PlacementGraphClasses.cpp b/tket/src/Placement/PlacementGraphClasses.cpp similarity index 100% rename from tket/src/Routing/PlacementGraphClasses.cpp rename to tket/src/Placement/PlacementGraphClasses.cpp diff --git a/tket/src/Routing/Qubit_Placement.cpp b/tket/src/Placement/Qubit_Placement.cpp similarity index 88% rename from tket/src/Routing/Qubit_Placement.cpp rename to tket/src/Placement/Qubit_Placement.cpp index a7695d435a..d3c324edc2 100644 --- a/tket/src/Routing/Qubit_Placement.cpp +++ b/tket/src/Placement/Qubit_Placement.cpp @@ -21,7 +21,6 @@ #include "Architecture/Architecture.hpp" #include "Graphs/Utils.hpp" #include "Placement.hpp" -#include "Routing.hpp" namespace tket { @@ -38,13 +37,53 @@ std::set interacting_qbs(const Circuit& circ) { return qbs; } +PlacementFrontier::PlacementFrontier(const Circuit& _circ) : circ(_circ) { + VertexVec input_slice; + quantum_in_edges = std::make_shared(); + boolean_in_edges = std::make_shared(); + + for (const Qubit& qb : circ.all_qubits()) { + Vertex input = circ.get_in(qb); + input_slice.push_back(input); + Edge candidate = circ.get_nth_out_edge(input, 0); + quantum_in_edges->insert({qb, circ.skip_irrelevant_edges(candidate)}); + } + for (const Bit& bit : circ.all_bits()) { + Vertex input = circ.get_in(bit); + EdgeVec candidates = circ.get_nth_b_out_bundle(input, 0); + boolean_in_edges->insert({bit, candidates}); + } + + CutFrontier next_cut = circ.next_cut(quantum_in_edges, boolean_in_edges); + slice = next_cut.slice; + quantum_out_edges = next_cut.u_frontier; +} + +void PlacementFrontier::next_slicefrontier() { + quantum_in_edges = std::make_shared(); + boolean_in_edges = std::make_shared(); + for (const std::pair& pair : quantum_out_edges->get()) { + Edge new_e = circ.skip_irrelevant_edges(pair.second); + quantum_in_edges->insert({pair.first, new_e}); + Vertex targ = circ.target(new_e); + EdgeVec targ_classical_ins = + circ.get_in_edges_of_type(targ, EdgeType::Boolean); + boolean_in_edges->insert( + {Bit("frontier_bit", pair.first.index()), targ_classical_ins}); + } + + CutFrontier next_cut = circ.next_cut(quantum_in_edges, boolean_in_edges); + slice = next_cut.slice; + quantum_out_edges = next_cut.u_frontier; +} + QubitGraph monomorph_interaction_graph( const Circuit& circ, const unsigned max_edges, unsigned depth_limit) { std::set qubits_considered = interacting_qbs(circ); QubitGraph q_graph(circ.all_qubits()); - RoutingFrontier current_sf(circ); + PlacementFrontier current_sf(circ); unsigned count_edges = 0; for (unsigned slice = 0; slice < depth_limit && count_edges < max_edges && @@ -77,7 +116,7 @@ QubitGraph generate_interaction_graph( const Circuit& circ, unsigned depth_limit) { std::set qubits_considered = interacting_qbs(circ); QubitGraph q_graph(circ.all_qubits()); - RoutingFrontier current_sf(circ); + PlacementFrontier current_sf(circ); for (unsigned slice = 0; slice < depth_limit && !current_sf.slice->empty() && qubits_considered.size() > 1; diff --git a/tket/src/Placement/include/Placement/NeighbourPlacements.hpp b/tket/src/Placement/include/Placement/NeighbourPlacements.hpp new file mode 100644 index 0000000000..236a64b79b --- /dev/null +++ b/tket/src/Placement/include/Placement/NeighbourPlacements.hpp @@ -0,0 +1,99 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "Placement.hpp" +#include "TokenSwapping/SwapFunctions.hpp" +#include "Utils/BiMapHeaders.hpp" +#include "Utils/RNG.hpp" + +namespace tket { + +/** + * @brief Given a placement map generates `n` nearby placement maps. + * + * Based on an architecture and a placement map, generates random + * placements that can be achieved with `m` swaps. + * + * Optionally uses token swapping optimisations to try to ensure + * that the generated placements cannot be obtained in less than `m` + * swaps, but this cannot be guaranteed. + */ +class NeighbourPlacements { + public: + using SwapVec = std::vector; + using NodeSwap = std::pair; + using NodeSwapVec = std::vector; + struct Result { + qubit_mapping_t map; + NodeSwapVec swaps; + }; + using ResultVec = std::vector; + + /** + * @brief Construct a new Swap Placement object. + * + * @param arc The architecture defining the allowed swaps. + * @param init_map The initial Qubit => Node map. + */ + NeighbourPlacements(const Architecture& arc, const qubit_mapping_t& init_map); + + /** + * @brief Generate `n` distinct placement maps using `dist` swaps for each map + * + * The sequences of swaps are generated randomly. Note that it cannot be + * guaranteed that the generated placement cannot be obtained in less than + * `dist` swaps. When optimise=true (default), we attempt to simplify + * chains of swaps to make it more likely that `dist` swaps are indeed + * necessary for the generated placement maps. + * + * If optimise=true, it is also possible that placements `dist` swaps away + * do not exist. `max_tries` controls the number of attempts to generate + * placements. + * + * If it is impossible (or very hard) to generate `n` distinct placement maps + * of distance `dist` swaps away, then this method will raise a warning + * and return fewer results and/or results with less than `dist` swaps. + * + * @param dist The number of swaps allowed on the architecture. + * @param n The number of placement maps to generate (default n=1). + * @param optimise Simplify the generated swap sequences (default true). + * @param seed Seed for random number generator (default seed=5489). + * @param max_tries Number of tries before aborting placement map generation + * (default max_tries=10). + * @return ResultVec A vector of the generated maps and swaps + */ + ResultVec get( + unsigned dist, unsigned n = 1, bool optimise = true, unsigned seed = 5489, + unsigned max_tries = 10); + + private: + // generate a single Result + Result gen_result( + unsigned dist, bool optimise = true, unsigned max_tries = 10); + // generate a single swap + Swap gen_swap(); + // apply swap list to init_map and return new placement map + Result convert_to_res(const SwapVec& swaps); + + Architecture arc_; + qubit_mapping_t init_map_; + boost::bimap u_to_node_; + RNG rng_; +}; + +} // namespace tket \ No newline at end of file diff --git a/tket/src/Routing/include/Routing/Placement.hpp b/tket/src/Placement/include/Placement/Placement.hpp similarity index 83% rename from tket/src/Routing/include/Routing/Placement.hpp rename to tket/src/Placement/include/Placement/Placement.hpp index ec431a2406..ff2f2dd60c 100644 --- a/tket/src/Routing/include/Routing/Placement.hpp +++ b/tket/src/Placement/include/Placement/Placement.hpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -104,6 +105,27 @@ struct PlacementConfig { JSON_DECL(PlacementConfig) +// stores and tracks the points of the circuit up to which has been solved +struct PlacementFrontier { + // set of 2qb vertices which need to be solved for + std::shared_ptr slice; + // Quantum Edges coming in to vertices in slice, indexed by qubit + std::shared_ptr quantum_in_edges; + // Quantum Edges leaving vertices in slice, indexed by qubit + std::shared_ptr quantum_out_edges; + // Boolean edges coming in to vertices in slice. Guarantees that all edges + // into every vertex in slice is represented in next_cut + std::shared_ptr boolean_in_edges; + + // reference to circuit that it acts on + const Circuit& circ; + + // initialise at front of circuit + explicit PlacementFrontier(const Circuit& _circ); + // move to next slice + void next_slicefrontier(); +}; + // Class for storing interaction graph. // Interacting qubits have an edge between them. class QubitGraph : public graphs::DirectedGraph { @@ -218,16 +240,19 @@ class Placement { /** * Modify qubits in place. * - * @return true iff circuit is modified + * @return true iff circuit or maps are modified */ - bool place(Circuit& circ_) const; + bool place( + Circuit& circ_, std::shared_ptr maps = nullptr) const; /** * Relabel circuit qubits to device nodes according to given map. * - * @return true iff circuit was modified + * @return true iff circuit or maps were modified */ - static bool place_with_map(Circuit& circ_, qubit_mapping_t& map_); + static bool place_with_map( + Circuit& circ, qubit_mapping_t& map_, + std::shared_ptr maps = nullptr); virtual qubit_mapping_t get_placement_map(const Circuit& circ_) const; @@ -244,6 +269,42 @@ class Placement { Architecture arc_; }; +/** + * NaivePlacement class provides methods for relabelling any + * Qubit objects in some Circuit to Node objects in some Architecture + * given the constraint that only Qubit that are not already labelled + * as some Node can be relabelled, and only to Architecture Node + * that are not already in the Circuit. + */ +class NaivePlacement : public Placement { + public: + /** + * @param _arc Architecture object later relabellings are produced for + */ + explicit NaivePlacement(const Architecture& _arc) { arc_ = _arc; } + /** + * Given some circuit, returns a map between Qubit which defines some + * relabelling of some Circuit qubits to Architecture qubits + * + * @param circ_ Circuit map relabelling is defined for + * + * @return Map defining relabelling for circuit Qubit objects + */ + qubit_mapping_t get_placement_map(const Circuit& circ_) const override; + + /** + * Given some circuit, returns a single map for relabelling + * in a vector. + * + * @param circ_ Circuit map relabelling is defined for + * + * @return Vector of a single Map defining relabelling for Circuit + * Qubit objects. + */ + std::vector get_all_placement_maps( + const Circuit& circ_) const override; +}; + class LinePlacement : public Placement { public: explicit LinePlacement(const Architecture& _arc) { arc_ = _arc; } diff --git a/tket/src/Routing/subgraph_mapping.cpp b/tket/src/Placement/subgraph_mapping.cpp similarity index 99% rename from tket/src/Routing/subgraph_mapping.cpp rename to tket/src/Placement/subgraph_mapping.cpp index 1479e5de4b..e0d7c27406 100644 --- a/tket/src/Routing/subgraph_mapping.cpp +++ b/tket/src/Placement/subgraph_mapping.cpp @@ -19,7 +19,7 @@ #include "Architecture/Architecture.hpp" #include "Graphs/Utils.hpp" #include "Placement.hpp" -#include "Routing/Placement.hpp" +#include "Placement/Placement.hpp" #include "Utils/Assert.hpp" #include "Utils/GraphHeaders.hpp" #include "Utils/TketLog.hpp" diff --git a/tket/src/Predicates/CMakeLists.txt b/tket/src/Predicates/CMakeLists.txt index 0370356384..63b23ac917 100644 --- a/tket/src/Predicates/CMakeLists.txt +++ b/tket/src/Predicates/CMakeLists.txt @@ -34,10 +34,12 @@ list(APPEND DEPS_${COMP} Converters Gate Graphs + Mapping Ops OpType PauliGraph - Routing + Placement + TokenSwapping Transformations Utils) diff --git a/tket/src/Predicates/CompilationUnit.cpp b/tket/src/Predicates/CompilationUnit.cpp index aee04df967..e2a0f4948f 100644 --- a/tket/src/Predicates/CompilationUnit.cpp +++ b/tket/src/Predicates/CompilationUnit.cpp @@ -13,6 +13,10 @@ // limitations under the License. #include "CompilationUnit.hpp" + +#include + +#include "Utils/UnitID.hpp" namespace tket { CompilationUnit::CompilationUnit(const Circuit& circ) : circ_(circ) { @@ -91,13 +95,11 @@ void CompilationUnit::initialize_cache() const { } void CompilationUnit::initialize_maps() { - if (!initial_map_.empty()) - throw std::logic_error("Initial map must be empty to be initialized"); - if (!final_map_.empty()) - throw std::logic_error("Final map must be empty to be initialized"); + if (maps) throw std::logic_error("Maps already initialized"); + maps = std::make_shared(); for (const UnitID& u : circ_.all_units()) { - initial_map_.insert({u, u}); - final_map_.insert({u, u}); + maps->initial.insert({u, u}); + maps->final.insert({u, u}); } } diff --git a/tket/src/Predicates/CompilerPass.cpp b/tket/src/Predicates/CompilerPass.cpp index d0b97c24fe..50218aeb84 100644 --- a/tket/src/Predicates/CompilerPass.cpp +++ b/tket/src/Predicates/CompilerPass.cpp @@ -14,12 +14,16 @@ #include "CompilerPass.hpp" +#include + +#include "Mapping/RoutingMethodJson.hpp" #include "PassGenerators.hpp" #include "PassLibrary.hpp" #include "Transformations/ContextualReduction.hpp" #include "Transformations/PauliOptimisation.hpp" #include "Utils/Json.hpp" #include "Utils/TketLog.hpp" +#include "Utils/UnitID.hpp" namespace tket { @@ -194,9 +198,7 @@ bool StandardPass::apply( unsatisfied_precon.value() ->to_string()); // just raise warning in super-unsafe mode // Allow trans_ to update the initial and final map - c_unit.circ_.unit_bimaps_ = {&c_unit.initial_map_, &c_unit.final_map_}; - bool changed = trans_.apply(c_unit.circ_); - c_unit.circ_.unit_bimaps_ = {nullptr, nullptr}; + bool changed = trans_.apply_fn(c_unit.circ_, c_unit.maps); update_cache(c_unit, safe_mode); after_apply(c_unit, this->get_config()); return changed; @@ -431,10 +433,14 @@ void from_json(const nlohmann::json& j, PassPtr& pp) { pp = gen_euler_pass(q, p, s); } else if (passname == "RoutingPass") { Architecture arc = content.at("architecture").get(); - RoutingConfig con = content.at("routing_config").get(); + std::vector con = content.at("routing_config"); pp = gen_routing_pass(arc, con); + } else if (passname == "PlacementPass") { pp = gen_placement_pass(content.at("placement").get()); + } else if (passname == "NaivePlacementPass") { + pp = gen_naive_placement_pass( + content.at("architecture").get()); } else if (passname == "RenameQubitsPass") { pp = gen_rename_qubits_pass( content.at("qubit_map").get>()); @@ -486,17 +492,19 @@ void from_json(const nlohmann::json& j, PassPtr& pp) { // SEQUENCE PASS - DESERIALIZABLE ONLY Architecture arc = content.at("architecture").get(); PlacementPtr place = content.at("placement").get(); - RoutingConfig config = content.at("routing_config").get(); + std::vector config = content.at("routing_config"); + pp = gen_full_mapping_pass(arc, place, config); } else if (passname == "DefaultMappingPass") { // SEQUENCE PASS - DESERIALIZABLE ONLY Architecture arc = content.at("architecture").get(); - pp = gen_default_mapping_pass(arc); + bool delay_measures = content.at("delay_measures").get(); + pp = gen_default_mapping_pass(arc, delay_measures); } else if (passname == "CXMappingPass") { // SEQUENCE PASS - DESERIALIZABLE ONLY Architecture arc = content.at("architecture").get(); PlacementPtr place = content.at("placement").get(); - RoutingConfig config = content.at("routing_config").get(); + std::vector config = content.at("routing_config"); bool directed_cx = content.at("directed").get(); bool delay_measures = content.at("delay_measures").get(); pp = gen_cx_mapping_pass(arc, place, config, directed_cx, delay_measures); diff --git a/tket/src/Predicates/PassGenerators.cpp b/tket/src/Predicates/PassGenerators.cpp index 2be0e53305..23ee444480 100644 --- a/tket/src/Predicates/PassGenerators.cpp +++ b/tket/src/Predicates/PassGenerators.cpp @@ -14,15 +14,20 @@ #include "PassGenerators.hpp" +#include + #include "ArchAwareSynth/SteinerForest.hpp" #include "Circuit/CircPool.hpp" #include "Circuit/Circuit.hpp" #include "Converters/PhasePoly.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Placement/Placement.hpp" #include "Predicates/CompilationUnit.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassLibrary.hpp" #include "Predicates/Predicates.hpp" -#include "Routing/Placement.hpp" #include "Transformations/BasicOptimisation.hpp" #include "Transformations/ContextualReduction.hpp" #include "Transformations/Decomposition.hpp" @@ -36,16 +41,14 @@ namespace tket { PassPtr gen_rebase_pass( - const OpTypeSet& multiqs, const Circuit& cx_replacement, - const OpTypeSet& singleqs, + const OpTypeSet& allowed_gates, const Circuit& cx_replacement, const std::function& tk1_replacement) { Transform t = Transforms::rebase_factory( - multiqs, cx_replacement, singleqs, tk1_replacement); + allowed_gates, cx_replacement, tk1_replacement); PredicatePtrMap precons; - OpTypeSet all_types(singleqs); - all_types.insert(multiqs.begin(), multiqs.end()); + OpTypeSet all_types(allowed_gates); all_types.insert(OpType::Measure); all_types.insert(OpType::Collapse); all_types.insert(OpType::Reset); @@ -59,9 +62,8 @@ PassPtr gen_rebase_pass( // record pass config nlohmann::json j; j["name"] = "RebaseCustom"; - j["basis_multiqs"] = multiqs; + j["basis_allowed"] = allowed_gates; j["basis_cx_replacement"] = cx_replacement; - j["basis_singleqs"] = singleqs; j["basis_tk1_replacement"] = "SERIALIZATION OF FUNCTIONS IS NOT YET SUPPORTED"; return std::make_shared(precons, t, pc, j); @@ -126,7 +128,12 @@ PassPtr gen_clifford_simp_pass(bool allow_swaps) { } PassPtr gen_rename_qubits_pass(const std::map& qm) { - Transform t = Transform([=](Circuit& circ) { return circ.rename_units(qm); }); + Transform t = + Transform([=](Circuit& circ, std::shared_ptr maps) { + bool changed = circ.rename_units(qm); + changed |= update_maps(maps, qm, qm); + return changed; + }); PredicatePtrMap precons = {}; PostConditions postcons = { {}, @@ -141,18 +148,19 @@ PassPtr gen_rename_qubits_pass(const std::map& qm) { } PassPtr gen_placement_pass(const PlacementPtr& placement_ptr) { - Transform::Transformation trans = [=](Circuit& circ) { + Transform::Transformation trans = [=](Circuit& circ, + std::shared_ptr maps) { // Fall back to line placement if graph placement fails bool changed; try { - changed = placement_ptr->place(circ); + changed = placement_ptr->place(circ, maps); } catch (const std::runtime_error& e) { tket_log()->warn(fmt::format( "PlacementPass failed with message: {} Fall back to LinePlacement.", e.what())); PlacementPtr line_placement_ptr = std::make_shared( placement_ptr->get_architecture_ref()); - changed = line_placement_ptr->place(circ); + changed = line_placement_ptr->place(circ, maps); } return changed; }; @@ -175,24 +183,55 @@ PassPtr gen_placement_pass(const PlacementPtr& placement_ptr) { return std::make_shared(precons, t, pc, j); } +PassPtr gen_naive_placement_pass(const Architecture& arc) { + Transform::Transformation trans = [=](Circuit& circ, + std::shared_ptr maps) { + NaivePlacement np(arc); + return np.place(circ, maps); + }; + Transform t = Transform(trans); + PredicatePtr n_qubit_pred = + std::make_shared(arc.n_nodes()); + + PredicatePtrMap precons{CompilationUnit::make_type_pair(n_qubit_pred)}; + PredicatePtr placement_pred = std::make_shared(arc); + PredicatePtrMap s_postcons{CompilationUnit::make_type_pair(placement_pred)}; + PostConditions pc{s_postcons, {}, Guarantee::Preserve}; + // record pass config + nlohmann::json j; + j["name"] = "NaivePlacementPass"; + j["architecture"] = arc; + return std::make_shared(precons, t, pc, j); +} + PassPtr gen_full_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, - const RoutingConfig& config) { - return gen_placement_pass(placement_ptr) >> gen_routing_pass(arc, config); + const std::vector& config) { + std::vector vpp = { + gen_placement_pass(placement_ptr), gen_routing_pass(arc, config), + gen_naive_placement_pass(arc)}; + return std::make_shared(vpp); } -PassPtr gen_default_mapping_pass(const Architecture& arc) { - PlacementPtr pp = std::make_shared(arc); - return gen_full_mapping_pass(arc, pp); +PassPtr gen_default_mapping_pass(const Architecture& arc, bool delay_measures) { + PassPtr return_pass = gen_full_mapping_pass( + arc, std::make_shared(arc), + {std::make_shared(), + std::make_shared()}); + if (delay_measures) { + return_pass = return_pass >> DelayMeasures(); + } + return return_pass; } PassPtr gen_cx_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, - const RoutingConfig& config, bool directed_cx, bool delay_measures) { - PassPtr rebase_pass = gen_rebase_pass( - {OpType::CX}, CircPool::CX(), all_single_qubit_types(), - Transforms::tk1_to_tk1); - + const std::vector& config, bool directed_cx, + bool delay_measures) { + OpTypeSet gate_set = all_single_qubit_types(); + gate_set.insert(OpType::CX); + PassPtr rebase_pass = + gen_rebase_pass(gate_set, CircPool::CX(), CircPool::tk1_to_tk1); PassPtr return_pass = rebase_pass >> gen_full_mapping_pass(arc, placement_ptr, config); if (delay_measures) return_pass = return_pass >> DelayMeasures(); @@ -201,15 +240,13 @@ PassPtr gen_cx_mapping_pass( return return_pass; } -PassPtr gen_routing_pass(const Architecture& arc, const RoutingConfig& config) { - Transform::Transformation trans = - [=](Circuit& circ) { // this doesn't work if capture by ref for some - // reason.... - Routing route(circ, arc); - std::pair circbool = route.solve(config); - circ = circbool.first; - return circbool.second; - }; +PassPtr gen_routing_pass( + const Architecture& arc, const std::vector& config) { + Transform::Transformation trans = [=](Circuit& circ, + std::shared_ptr maps) { + MappingManager mm(std::make_shared(arc)); + return mm.route_circuit_with_maps(circ, config, maps); + }; Transform t = Transform(trans); PredicatePtr twoqbpred = std::make_shared(); @@ -245,7 +282,8 @@ PassPtr gen_routing_pass(const Architecture& arc, const RoutingConfig& config) { } PassPtr gen_placement_pass_phase_poly(const Architecture& arc) { - Transform::Transformation trans = [=](Circuit& circ) { + Transform::Transformation trans = [=](Circuit& circ, + std::shared_ptr maps) { if (arc.n_nodes() < circ.n_qubits()) { throw CircuitInvalidity( "Circuit has more qubits than the architecture has nodes."); @@ -263,18 +301,24 @@ PassPtr gen_placement_pass_phase_poly(const Architecture& arc) { } circ.rename_units(qubit_to_nodes); + update_maps(maps, qubit_to_nodes, qubit_to_nodes); return true; }; Transform t = Transform(trans); - PredicatePtrMap precons{}; + PredicatePtr no_wire_swap = std::make_shared(); + PredicatePtrMap precons{CompilationUnit::make_type_pair(no_wire_swap)}; + PredicatePtr placement_pred = std::make_shared(arc); PredicatePtr n_qubit_pred = std::make_shared(arc.n_nodes()); + PredicatePtrMap s_postcons{ CompilationUnit::make_type_pair(placement_pred), - CompilationUnit::make_type_pair(n_qubit_pred)}; + CompilationUnit::make_type_pair(n_qubit_pred), + CompilationUnit::make_type_pair(no_wire_swap)}; + PostConditions pc{s_postcons, {}, Guarantee::Preserve}; // record pass config nlohmann::json j; @@ -287,7 +331,7 @@ PassPtr gen_placement_pass_phase_poly(const Architecture& arc) { PassPtr aas_routing_pass( const Architecture& arc, const unsigned lookahead, const aas::CNotSynthType cnotsynthtype) { - Transform::Transformation trans = [=](Circuit& circ) { + Transform::SimpleTransformation trans = [=](Circuit& circ) { // check input: if (lookahead == 0) { throw std::logic_error("lookahead must be > 0"); @@ -297,6 +341,10 @@ PassPtr aas_routing_pass( "Circuit has more qubits than the architecture has nodes."); } + // this pass is not able to handle implicit wire swaps + // this is additionally assured by a precondition of this pass + TKET_ASSERT(!circ.has_implicit_wireswaps()); + qubit_vector_t all_qu = circ.all_qubits(); Circuit input_circ = circ; @@ -379,9 +427,12 @@ PassPtr aas_routing_pass( PredicatePtr placedpred = std::make_shared(arc); PredicatePtr n_qubit_pred = std::make_shared(arc.n_nodes()); + PredicatePtr no_wire_swap = std::make_shared(); + PredicatePtrMap precons{ CompilationUnit::make_type_pair(placedpred), - CompilationUnit::make_type_pair(n_qubit_pred)}; + CompilationUnit::make_type_pair(n_qubit_pred), + CompilationUnit::make_type_pair(no_wire_swap)}; PredicatePtr postcon1 = std::make_shared(arc); std::pair pair1 = @@ -409,12 +460,13 @@ PassPtr gen_full_mapping_pass_phase_poly( } PassPtr gen_directed_cx_routing_pass( - const Architecture& arc, const RoutingConfig& config) { + const Architecture& arc, const std::vector& config) { OpTypeSet multis = {OpType::CX, OpType::BRIDGE, OpType::SWAP}; + OpTypeSet gate_set = all_single_qubit_types(); + gate_set.insert(multis.begin(), multis.end()); + return gen_routing_pass(arc, config) >> - gen_rebase_pass( - multis, CircPool::CX(), all_single_qubit_types(), - Transforms::tk1_to_tk1) >> + gen_rebase_pass(gate_set, CircPool::CX(), CircPool::tk1_to_tk1) >> gen_decompose_routing_gates_to_cxs_pass(arc, true); } diff --git a/tket/src/Predicates/PassLibrary.cpp b/tket/src/Predicates/PassLibrary.cpp index 7abb46727e..fc1f0d95d1 100644 --- a/tket/src/Predicates/PassLibrary.cpp +++ b/tket/src/Predicates/PassLibrary.cpp @@ -16,6 +16,7 @@ #include +#include "Circuit/CircPool.hpp" #include "PassGenerators.hpp" #include "Predicates/CompilerPass.hpp" #include "Transformations/BasicOptimisation.hpp" @@ -285,6 +286,8 @@ const PassPtr &ComposePhasePolyBoxes() { * converts a circuit containing all possible gates to a circuit * containing only phase poly boxes + H gates (and measure + reset + collapse * + barrier) + * this pass will replace all wire swaps in the given circuit and they will be + * included in the last or an additional phase poly boxes */ static const PassPtr pp([]() { Transform t = @@ -293,7 +296,13 @@ const PassPtr &ComposePhasePolyBoxes() { PredicatePtrMap precons{CompilationUnit::make_type_pair(noclas)}; - PostConditions postcon = {precons, {}, Guarantee::Clear}; + PredicatePtr no_wire_swap = std::make_shared(); + + PredicatePtrMap s_postcons{ + CompilationUnit::make_type_pair(noclas), + CompilationUnit::make_type_pair(no_wire_swap)}; + PostConditions postcon{s_postcons, {}, Guarantee::Preserve}; + nlohmann::json j; j["name"] = "ComposePhasePolyBoxes"; return std::make_shared(precons, t, postcon, j); @@ -346,7 +355,7 @@ const PassPtr &SquashTK1() { const PassPtr &SquashHQS() { static const PassPtr pp([]() { return gen_squash_pass( - {OpType::Rz, OpType::PhasedX}, Transforms::tk1_to_PhasedXRz); + {OpType::Rz, OpType::PhasedX}, CircPool::tk1_to_PhasedXRz); }()); return pp; } @@ -368,11 +377,13 @@ const PassPtr &DecomposeBridges() { const PassPtr &FlattenRegisters() { static const PassPtr pp([]() { - Transform t = Transform([](Circuit &circ) { - if (circ.is_simple()) return false; - circ.flatten_registers(); - return true; - }); + Transform t = + Transform([](Circuit &circ, std::shared_ptr maps) { + if (circ.is_simple()) return false; + unit_map_t qmap = circ.flatten_registers(); + update_maps(maps, qmap, qmap); + return true; + }); PredicatePtrMap s_ps; PredicatePtr simple = std::make_shared(); PredicatePtrMap spec_postcons{CompilationUnit::make_type_pair(simple)}; diff --git a/tket/src/Predicates/Predicates.cpp b/tket/src/Predicates/Predicates.cpp index 8e10cd72d1..a2652bcdc8 100644 --- a/tket/src/Predicates/Predicates.cpp +++ b/tket/src/Predicates/Predicates.cpp @@ -15,7 +15,9 @@ #include "Predicates.hpp" #include "Gate/Gate.hpp" -#include "Routing/Verification.hpp" +#include "Mapping/Verification.hpp" +#include "Placement/Placement.hpp" +#include "Utils/UnitID.hpp" namespace tket { @@ -331,6 +333,12 @@ bool ConnectivityPredicate::implies(const Predicate& other) const { dynamic_cast(other); const Architecture& arc1 = arch_; const Architecture& arc2 = other_c.arch_; + // Check that all nodes in arc1 are in arc2: + for (const Node& n : arc1.get_all_nodes_vec()) { + if (!arc2.node_exists(n)) { + return false; + } + } // Collect all edges in arc1 for (auto [n1, n2] : arc1.get_all_edges_vec()) { if (!arc2.edge_exists(n1, n2) && !arc2.edge_exists(n2, n1)) { diff --git a/tket/src/Predicates/include/Predicates/CompilationUnit.hpp b/tket/src/Predicates/include/Predicates/CompilationUnit.hpp index f129ef4c6d..99a759b366 100644 --- a/tket/src/Predicates/include/Predicates/CompilationUnit.hpp +++ b/tket/src/Predicates/include/Predicates/CompilationUnit.hpp @@ -14,6 +14,8 @@ #pragma once +#include + #include "Predicates.hpp" namespace tket { @@ -42,8 +44,8 @@ class CompilationUnit { /* getters to inspect the data members */ const Circuit& get_circ_ref() const { return circ_; } const PredicateCache& get_cache_ref() const { return cache_; } - const unit_bimap_t& get_initial_map_ref() const { return initial_map_; } - const unit_bimap_t& get_final_map_ref() const { return final_map_; } + const unit_bimap_t& get_initial_map_ref() const { return maps->initial; } + const unit_bimap_t& get_final_map_ref() const { return maps->final; } std::string to_string() const; friend class Circuit; @@ -62,12 +64,8 @@ class CompilationUnit { // satisfy by the end of your Compiler Passes mutable PredicateCache cache_; // updated continuously - /** Map from original logical qubits to corresponding current qubits wtr - * inputs */ - unit_bimap_t initial_map_; - /** Map from original logical qubits to corresponding current qubits wtr - * outputs */ - unit_bimap_t final_map_; + // Maps from original logical qubits to corresponding current qubits + std::shared_ptr maps; }; } // namespace tket diff --git a/tket/src/Predicates/include/Predicates/CompilerPass.hpp b/tket/src/Predicates/include/Predicates/CompilerPass.hpp index 89bef2e497..31bf9f0056 100644 --- a/tket/src/Predicates/include/Predicates/CompilerPass.hpp +++ b/tket/src/Predicates/include/Predicates/CompilerPass.hpp @@ -65,6 +65,13 @@ struct PostConditions { PredicatePtrMap specific_postcons_; PredicateClassGuarantees generic_postcons_; Guarantee default_postcon_; + PostConditions( + const PredicatePtrMap& specific_postcons = {}, + const PredicateClassGuarantees& generic_postcons = {}, + Guarantee default_postcon = Guarantee::Clear) + : specific_postcons_(specific_postcons), + generic_postcons_(generic_postcons), + default_postcon_(default_postcon) {} }; /** diff --git a/tket/src/Predicates/include/Predicates/PassGenerators.hpp b/tket/src/Predicates/include/Predicates/PassGenerators.hpp index 36e873be56..3bccaa3ea0 100644 --- a/tket/src/Predicates/include/Predicates/PassGenerators.hpp +++ b/tket/src/Predicates/include/Predicates/PassGenerators.hpp @@ -16,6 +16,8 @@ #include "ArchAwareSynth/SteinerForest.hpp" #include "CompilerPass.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" #include "Transformations/ContextualReduction.hpp" #include "Transformations/PauliOptimisation.hpp" @@ -23,8 +25,7 @@ namespace tket { /* a wrapper method for the rebase_factory in Transforms */ PassPtr gen_rebase_pass( - const OpTypeSet& multiqs, const Circuit& cx_replacement, - const OpTypeSet& singleqs, + const OpTypeSet& allowed_gates, const Circuit& cx_replacement, const std::function& tk1_replacement); @@ -44,23 +45,28 @@ PassPtr gen_clifford_simp_pass(bool allow_swaps = true); PassPtr gen_rename_qubits_pass(const std::map& qm); PassPtr gen_placement_pass(const PlacementPtr& placement_ptr); + +PassPtr gen_naive_placement_pass(const Architecture& arc); /* This higher order function generates a Routing pass using the -RoutingConfig object */ +std::vector object */ PassPtr gen_full_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, - const RoutingConfig& config = {}); -PassPtr gen_default_mapping_pass(const Architecture& arc); + const std::vector& config); +PassPtr gen_default_mapping_pass( + const Architecture& arc, bool delay_measures = true); PassPtr gen_cx_mapping_pass( const Architecture& arc, const PlacementPtr& placement_ptr, - const RoutingConfig& config, bool directed_cx, bool delay_measures); + const std::vector& config, bool directed_cx, + bool delay_measures); PassPtr gen_routing_pass( - const Architecture& arc, const RoutingConfig& config = {}); + const Architecture& arc, const std::vector& config); PassPtr gen_directed_cx_routing_pass( - const Architecture& arc, const RoutingConfig& config = {}); + const Architecture& arc, const std::vector& config); /** * execute architecture aware synthesis on a given architecture for an allready * place circuit, only for circuit which contains Cx+Rz+H gates + * this pass is not able to handle implicit wire swaps * @param arc architecture to route on * @param lookahead parameter for the recursion depth in the algorithm, the * value should be > 0 diff --git a/tket/src/Predicates/include/Predicates/Predicates.hpp b/tket/src/Predicates/include/Predicates/Predicates.hpp index 6eb3f78aad..e7ea785701 100644 --- a/tket/src/Predicates/include/Predicates/Predicates.hpp +++ b/tket/src/Predicates/include/Predicates/Predicates.hpp @@ -15,7 +15,7 @@ #pragma once #include -#include "Routing/Routing.hpp" +#include "Architecture/Architecture.hpp" #include "Transformations/Transform.hpp" namespace tket { diff --git a/tket/src/Program/Program_iteration.cpp b/tket/src/Program/Program_iteration.cpp index 634c3429b9..af10b97eff 100644 --- a/tket/src/Program/Program_iteration.cpp +++ b/tket/src/Program/Program_iteration.cpp @@ -18,7 +18,8 @@ namespace tket { Program::BlockIterator::BlockIterator() - : current_vert_(boost::graph_traits::null_vertex()) {} + : prog_(nullptr), + current_vert_(boost::graph_traits::null_vertex()) {} Program::BlockIterator::BlockIterator(const Program &p) { FGVert first = p.get_successors(p.entry_).front(); diff --git a/tket/src/Routing/Board_Analysis.cpp b/tket/src/Routing/Board_Analysis.cpp deleted file mode 100644 index d3cb3ea0a5..0000000000 --- a/tket/src/Routing/Board_Analysis.cpp +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Routing.hpp" - -namespace tket { - -bool node_active(const qubit_bimap_t& map, Node node) { - const bool found = map.right.find(node) != map.right.end(); - return found; -} - -Node Routing::find_best_inactive_node( - const Node& target_node, const Architecture& arc) const { - const unsigned diameter = arc.get_diameter(); - for (unsigned k = 1; k <= diameter; k++) { - std::vector potential_nodes = arc.nodes_at_distance(target_node, k); - for (Node potential : potential_nodes) { - if (!node_active(qmap, potential)) { - return potential; - } - } - } - throw ArchitectureFull(); // gotta hope you never get here... -} - -void Routing::activate_node(const Node& node) { - current_arc_.add_node(node); - for (Node neigh : original_arc_.get_neighbour_nodes(node)) { - if (node_active(qmap, neigh)) { - if (original_arc_.edge_exists(node, neigh)) { - current_arc_.add_connection(node, neigh); - } - if (original_arc_.edge_exists(neigh, node)) { - current_arc_.add_connection(neigh, node); - } - } - } -} - -void Routing::reactivate_qubit(const Qubit& qb, const Qubit& target) { - // finds 'best' available node - Node node = find_best_inactive_node(qmap.left.at(target), original_arc_); - - // updates qmap and initial maps to reflect this qb being at that node - activate_node(node); - std::pair new_in = {qb, node}; - qmap.left.insert(new_in); - init_map.left.insert(new_in); -} - -} // namespace tket diff --git a/tket/src/Routing/Routing.cpp b/tket/src/Routing/Routing.cpp deleted file mode 100644 index f48ac59396..0000000000 --- a/tket/src/Routing/Routing.cpp +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Routing.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "Utils/HelperFunctions.hpp" -#include "Utils/Json.hpp" - -namespace tket { - -bool RoutingConfig::operator==(const RoutingConfig& other) const { - return (this->depth_limit == other.depth_limit) && - (this->distrib_limit == other.distrib_limit) && - (this->interactions_limit == other.interactions_limit) && - (this->distrib_exponent == other.distrib_exponent); -} - -// If unit map is same pre and both routing, then the same placement procedure -// has happened in both cases, and routing is deterministic (!!) so same -// SWAP/Bridges added assuming same config -bool Routing::circuit_modified() const { - if (route_stats.swap_count > 0) return true; - if (route_stats.bridge_count > 0) return true; - if (circ_.boundary != original_boundary) return true; - return false; -} - -/* Class Constructor */ -Routing::Routing(const Circuit& _circ, const Architecture& _arc) - : circ_(_circ), slice_frontier_(circ_), original_arc_(_arc) { - circ_.unit_bimaps_ = _circ.unit_bimaps_; - original_boundary = circ_.boundary; - - current_arc_ = original_arc_; - // Checks for circuit and architecture compatibility - if (circ_.n_qubits() > current_arc_.n_nodes() || current_arc_.n_nodes() < 1) { - throw ArchitectureMismatch(circ_.n_qubits(), current_arc_.n_nodes()); - } - - // Information for placement & running routing with subgraph of architecture - // Initial nodes number - - // Track which nodes are actually active - for (const UnitID& uid : current_arc_.nodes()) { - Node n(uid); - interaction.insert({n, n}); - } -} - -void to_json(nlohmann::json& j, const RoutingConfig& config) { - j["depth_limit"] = config.depth_limit; - j["distrib_limit"] = config.distrib_limit; - j["interactions_limit"] = config.interactions_limit; - j["distrib_exponent"] = config.distrib_exponent; -} - -void from_json(const nlohmann::json& j, RoutingConfig& config) { - config.depth_limit = j.at("depth_limit").get(); - config.distrib_limit = j.at("distrib_limit").get(); - config.interactions_limit = j.at("interactions_limit").get(); - config.distrib_exponent = j.at("distrib_exponent").get(); -} - -std::vector Routing::get_active_nodes() const { - node_vector_t ret; - ret.reserve(qmap.size()); - for (auto [qb, n] : qmap.left) { - ret.push_back(n); - } - return ret; -} - -qubit_mapping_t Routing::return_final_map() const { - return bimap_to_map(final_map.left); -} - -qubit_mapping_t Routing::return_initial_map() const { - return bimap_to_map(init_map.left); -} - -bool subgraph_remove_if_connected( - Architecture& arc, const Architecture& subarc, const Node& node) { - // do not remove if node is in subarc - if (subarc.node_exists(node)) { - return false; - } - if (subarc.n_nodes() > 0) { - node_set_t ap = arc.get_articulation_points(subarc); - - if (ap.find(node) != ap.end()) { - return false; - } - } - - arc.remove_node(node); - return true; -} - -void remove_unmapped_nodes( - Architecture& arc, qubit_bimap_t& map, Circuit& circ) { - std::vector unmapped_nodes; - std::vector mapped_nodes; - - r_const_iterator_t iend = map.right.end(); - for (const UnitID& uid : arc.nodes()) { - Node n(uid); - r_const_iterator_t find_node = map.right.find(n); - if (find_node == iend) { - unmapped_nodes.push_back(n); - } else { - mapped_nodes.push_back(n); - } - } - Architecture subarc = arc.create_subarch(mapped_nodes); - - // sort mapped nodes from least connected to most (remove least connected - // first) - std::sort( - unmapped_nodes.begin(), unmapped_nodes.end(), [&arc](Node x, Node y) { - return (arc.get_out_degree(x) < arc.get_out_degree(y)); - }); - - qubit_vector_t available; - for (const Qubit& q : circ.all_qubits()) { - if (map.left.find(q) == map.left.end()) { - available.push_back(q); - } - } - - for (const Node& node : unmapped_nodes) { - if (!subgraph_remove_if_connected(arc, subarc, node)) { - // if node can't be removed, map to first unmapped qubit - if (available.empty()) - throw CircuitInvalidity( - "Routing is unable to construct connected placement from partial " - "placement using unplaced logical qubits. Please update the " - "circuit placement to a set of connected physical qubits."); - map.insert({available.front(), node}); - available.erase(available.begin()); - } - } -} - -qubit_mapping_t get_qmap_from_circuit(Architecture& arc, Circuit& circ) { - qubit_vector_t all_qbs = circ.all_qubits(); - node_set_t all_nodes = arc.nodes(); - - qubit_mapping_t qubit_map; - for (Qubit q : all_qbs) { - Node n(q); - if (all_nodes.find(n) != all_nodes.end()) { - qubit_map.insert({q, n}); - } - } - return qubit_map; -} - -std::pair Routing::solve(const RoutingConfig& config) { - config_ = config; - qubit_mapping_t qubit_map = get_qmap_from_circuit(current_arc_, circ_); - slice_frontier_.init(); - if (slice_frontier_.slice->empty()) { - organise_registers_and_maps(); - } else { - // Some nodes are permanently unused due to difference in architecture nodes - // and number of used wires in circuit To account for this, place highest - // numbered wires (i.e. unused) into set bad nodes of architecture - - // Placement method attempts to find a good initial allocation of qubits to - // nodes, aiming to reduce overall circuit depth. The method aims to put - // intreacting qubits in the first few circuit timesteps on adjacent nodes - // If no placement, qubits placed sequentially on nodes i.e. qubit 0 -> node - // 0 etc. - - if (qubit_map.size() != 0) { - init_map.left.insert(qubit_map.begin(), qubit_map.end()); - } - remove_unmapped_nodes(current_arc_, init_map, circ_); - final_map = remap(init_map); - organise_registers_and_maps(); - } - bool modified = circuit_modified(); - return {circ_, modified}; -} - -// Tidying up of qregisters and initial and final maps after SWAP adding. -void Routing::organise_registers_and_maps() { - // Given all the new empty wires with no home, if a qubit isnt in the initial - // map, find it an unassigned node and shove it there. - auto all_nodes = original_arc_.get_all_nodes_vec(); - unsigned next_ind = 0; - Node next_node = all_nodes[next_ind]; - - for (const Qubit& qb : circ_.all_qubits()) { - if (init_map.left.find(qb) == init_map.left.end()) { - // find next free node - while (init_map.right.count(next_node)) { - next_node = all_nodes[++next_ind]; - if (next_ind == all_nodes.size()) { - throw ArchitectureMismatch(circ_.n_qubits(), current_arc_.n_nodes()); - } - } - init_map.left.insert({qb, next_node}); - final_map.left.insert({qb, next_node}); - } - } - - // Due to the addition of SWAP gates, a qubit path may change, and so it's - // output boundary ordering may not match the input boundary ordering. The - // following updates the output boundary to match the ordering of the final - // slice frontier Make the input boundary match up to node numbering of - // architecture. - boundary_t new_boundary; - qubit_mapping_t reorder_map = bimap_to_map(init_map.left); - for (const std::pair& map : reorder_map) { - Qubit target = final_map.right.at(map.second); - new_boundary.insert( - {map.second, circ_.get_in(map.first), circ_.get_out(target)}); - // Which makes it all nicer - } - // add classical bits to new_boundary - for (auto [it, end] = - circ_.boundary.get().equal_range(UnitType::Bit); - it != end; it++) { - new_boundary.insert(*it); - } - - circ_.boundary = new_boundary; - circ_.update_initial_map(reorder_map); - circ_.update_final_map(bimap_to_map(final_map.left)); -} - -// Remap completes the routing algorithm -// slices passed as copy as 3 pass placement needs original preserved -qubit_bimap_t Routing::remap(const qubit_bimap_t& init) { - qmap = init; - - advance_frontier(); - // The routing algorithm: - // 1) Slices of circuit are parallelised/packed/whatever into 'timesteps' - // 2) Swaps are 'proposed' on edges connected to any nodes housing an - // 'interacting' qubit (interacting -> qubit is in some two qubit interaction - // in timestep 0) 3) A distance heuristic is used to determine whether the - // swap proposed will bring interacting qubits closer 4) If a swap is bring - // interacting qubits together it is compared to a held 'best swap'. The - // comparison is achieved by applying the same distance heuristic over future - // timesteps, until one is deemed strictly better. 5) If a succesful swap is - // found (from 3)), the swap gate is added to the circuit, information on - // which nodes home which qubits is updated and 1)->4) is repeated. 6) If no - // succesful swap is found, Dijkstra's algorithm is used to find a path in - // the graph between two interacting qubits, which the qubits are then swapped - // along. - // ... The pair of interacting qubits in the first timestep with greatest path - // distance between them is chosen. Algorithm then repeats 1)->4). - // for(unsigned count=0;slice_frontier_.slice.size()!=0 && count<2;count++){ - while (!slice_frontier_.slice->empty()) { - SwapResults single_swap = try_all_swaps(current_arc_.get_all_edges_vec()); - if (single_swap.success) { - route_stats.n_try_all_swaps++; - perform_action(single_swap.swap); - } else { - route_stats.n_solve_furthest++; - if (!solve_furthest()) { - throw RoutingFailure(); - } - } - advance_frontier(); - } - - qubit_bimap_t final_qmap; - for (l_const_iterator_t it = qmap.left.begin(); it != qmap.left.end(); ++it) { - Edge e = slice_frontier_.quantum_out_edges->get() - .find(it->first) - ->second; - Vertex v = circ_.target(e); - while (!circ_.detect_final_Op(v)) { - e = circ_.get_next_edge(v, e); - v = circ_.target(e); - } - Qubit out_q(circ_.get_id_from_out(v)); - final_qmap.insert({out_q, it->second}); - } - - return final_qmap; -} - -} // namespace tket diff --git a/tket/src/Routing/Slice_Manipulation.cpp b/tket/src/Routing/Slice_Manipulation.cpp deleted file mode 100644 index 597d6f8d7b..0000000000 --- a/tket/src/Routing/Slice_Manipulation.cpp +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include - -#include "Routing.hpp" - -namespace tket { - -RoutingFrontier::RoutingFrontier(const Circuit& _circ) : circ(_circ) { init(); } -void RoutingFrontier::init() { - VertexVec input_slice; - quantum_in_edges = std::make_shared(); - classical_in_edges = std::make_shared(); - - for (const Qubit& qb : circ.all_qubits()) { - Vertex input = circ.get_in(qb); - input_slice.push_back(input); - Edge candidate = circ.get_nth_out_edge(input, 0); - quantum_in_edges->insert({qb, circ.skip_irrelevant_edges(candidate)}); - } - for (const Bit& bit : circ.all_bits()) { - Vertex input = circ.get_in(bit); - EdgeVec candidates = circ.get_nth_b_out_bundle(input, 0); - classical_in_edges->insert({bit, candidates}); - } - - CutFrontier next_cut = circ.next_cut(quantum_in_edges, classical_in_edges); - slice = next_cut.slice; - quantum_out_edges = next_cut.u_frontier; -} - -void RoutingFrontier::next_slicefrontier() { - quantum_in_edges = std::make_shared(); - classical_in_edges = std::make_shared(); - for (const std::pair& pair : quantum_out_edges->get()) { - Edge new_e = circ.skip_irrelevant_edges(pair.second); - quantum_in_edges->insert({pair.first, new_e}); - Vertex targ = circ.target(new_e); - EdgeVec targ_classical_ins = - circ.get_in_edges_of_type(targ, EdgeType::Boolean); - classical_in_edges->insert( - {Bit("frontier_bit", pair.first.index()), targ_classical_ins}); - } - - CutFrontier next_cut = circ.next_cut(quantum_in_edges, classical_in_edges); - slice = next_cut.slice; - quantum_out_edges = next_cut.u_frontier; -} - -std::vector Routing::nodes_from_qubits(const qubit_vector_t& qubs) { - std::vector nodes; - unsigned start = 0; - if (qmap.empty()) { - Node node0 = *(original_arc_.max_degree_nodes().begin()); - activate_node(node0); - qmap.left.insert({qubs[0], node0}); - init_map.left.insert({qubs[0], node0}); - nodes.push_back(node0); - start++; - } - - for (unsigned i = start; i < qubs.size(); i++) { - l_const_iterator_t node_find = qmap.left.find(qubs[i]); - if (node_find == qmap.left.end()) { - if (i < qubs.size() - 1 && - qmap.left.find(qubs[i + 1]) != - qmap.left.end()) { // TODO: Could this if condition cause some - // nasty non determinism? - reactivate_qubit(qubs[i], qubs[i + 1]); - nodes.push_back(qmap.left.at(qubs[i])); - } else { - if (i != 0) { - reactivate_qubit(qubs[i], qubs[0]); - nodes.push_back(qmap.left.at(qubs[i])); - } else { - reactivate_qubit(qubs[i], qmap.begin()->left); - nodes.push_back(qmap.left.at(qubs[i])); - } - } - } else { - nodes.push_back(node_find->second); - } - } - return nodes; -} - -/* -Advances slice frontier past any two_qubit operations on adjacent nodes -*/ -bool Routing::advance_frontier() { - bool found_adjacent_op = true; - while (found_adjacent_op && !slice_frontier_.slice->empty()) { - found_adjacent_op = false; - for (const Vertex& vert : *slice_frontier_.slice) { - qubit_vector_t qubs; - for (const Edge& q_out : - circ_.get_out_edges_of_type(vert, EdgeType::Quantum)) { - for (const std::pair& pair : - slice_frontier_.quantum_out_edges->get()) { - if (pair.second == q_out) { - qubs.push_back(Qubit(pair.first)); - break; - } - } - } - // Find OpType. If OpType is a Conditional, unpack to find vertex inside. - // If it's nested, this will fail. - OpType vert_type = circ_.get_OpType_from_Vertex(vert); - if (vert_type == OpType::Conditional) { - const Conditional& b = static_cast( - *circ_.get_Op_ptr_from_Vertex(vert)); - vert_type = b.get_op()->get_type(); - } - - // the vertex must be two qubits or a bridge, which we can skip past - - if (qubs.size() != 2 && vert_type != OpType::BRIDGE && - vert_type != OpType::Barrier) { - throw(CircuitInvalidity( - "Vertex has " + std::to_string(qubs.size()) + - " qubits, expected 2.")); - } - // BRIDGE gates are guaranteed to be across 3 adjacent nodes, - // already mapped so can just be read directly from the qmap - // otherwise, qubits may need to be activated first - std::vector nods = nodes_from_qubits(qubs); - - bool all_qbs_adjacent = true; - for (unsigned i = 0; i < nods.size() - 1; i++) { - all_qbs_adjacent &= - (current_arc_.get_distance(nods[i], nods[i + 1]) == 1); - } - if (all_qbs_adjacent || - vert_type == OpType::Barrier) { // if by eachother - found_adjacent_op = true; // i.e. at least one 2qb gate has - // been able to run - // for all qubits skip subsequent single qubit vertices to move - // in edges to be prior to next multiqubit vertex - for (const Qubit& qub : qubs) { - Edge new_e = circ_.skip_irrelevant_edges( - slice_frontier_.quantum_out_edges->find(qub)->second); - slice_frontier_.quantum_in_edges->replace( - slice_frontier_.quantum_in_edges->find(qub), {qub, new_e}); - Vertex targ = circ_.target(new_e); - EdgeVec targ_classical_ins = - circ_.get_in_edges_of_type(targ, EdgeType::Boolean); - Bit b("frontier_bit", qub.index()); - if (slice_frontier_.classical_in_edges->find(b) == - slice_frontier_.classical_in_edges->end()) { - slice_frontier_.classical_in_edges->insert({b, targ_classical_ins}); - } else { - slice_frontier_.classical_in_edges->replace( - slice_frontier_.classical_in_edges->find(b), - {b, targ_classical_ins}); - } - } - } - } - if (found_adjacent_op) { - CutFrontier next_cut = circ_.next_cut( - slice_frontier_.quantum_in_edges, slice_frontier_.classical_in_edges); - slice_frontier_.slice = next_cut.slice; - slice_frontier_.quantum_out_edges = next_cut.u_frontier; - slice_frontier_.classical_in_edges = std::make_shared(); - for (const std::pair& pair : - slice_frontier_.quantum_in_edges->get()) { - Vertex targ = circ_.target(pair.second); - EdgeVec targ_classical_ins = - circ_.get_in_edges_of_type(targ, EdgeType::Boolean); - Bit b("frontier_bit", pair.first.index()); - slice_frontier_.classical_in_edges->insert({b, targ_classical_ins}); - } - } - } - - interaction = generate_interaction_frontier(slice_frontier_); // reset - dist_vector = generate_distance_vector(interaction); - return found_adjacent_op; -} - -Interactions Routing::generate_interaction_frontier( - const RoutingFrontier& slice_front) { - Interactions inter; - for (const UnitID& uid : current_arc_.nodes()) { - Node n(uid); - inter.insert({n, n}); - } - for (const Vertex& vert : *slice_front.slice) { - qubit_vector_t qubs; - for (const Edge& q_out : - circ_.get_out_edges_of_type(vert, EdgeType::Quantum)) { - for (const std::pair& pair : - slice_front.quantum_out_edges->get()) { - if (pair.second == q_out) { - qubs.push_back(Qubit(pair.first)); - break; - } - } - } - // if generate_interaction_frontier called with slice_frontier_ no ops with - // more than two qubits will be present if generate_interaction_frontier - // called with frontier made in try_all_swaps or check_distributed_cx, - // Barrier Op possible. If barrier op in slice, don't add qubits in barrier - // interaction to Interactions - if (qubs.size() != 2) { - if (circ_.get_OpType_from_Vertex(vert) == OpType::Barrier) continue; - throw CircuitInvalidity( - "Vertex has " + std::to_string(qubs.size()) + " qubits, expected 2."); - } - - l_const_iterator_t node0_find = qmap.left.find(qubs[0]); - l_const_iterator_t node1_find = qmap.left.find(qubs[1]); - if (node0_find != qmap.left.end() && node1_find != qmap.left.end()) { - Node one = node0_find->second; - Node two = node1_find->second; - inter[one] = two; - inter[two] = one; - } - } - return inter; -} - -} // namespace tket diff --git a/tket/src/Routing/Swap_Analysis.cpp b/tket/src/Routing/Swap_Analysis.cpp deleted file mode 100644 index 1c15ac86b9..0000000000 --- a/tket/src/Routing/Swap_Analysis.cpp +++ /dev/null @@ -1,606 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Architecture/Architecture.hpp" -#include "Circuit/CircPool.hpp" -#include "Routing/Routing.hpp" - -namespace tket { - -/* Routing Class Methods for picking optimal swaps */ - -/* Overloaded methods for generating distance vectors */ -// Distance vectors comprise of information pertaining to the architectural -// distance between qubits immediately interacting - -// Generates distance vector from input interaction vector -std::vector Routing::generate_distance_vector( - const Interactions &inter) const { - const unsigned n = current_arc_.get_diameter(); - // const unsigned n = active_distance_matrix.maxCoeff(); - if (n < 1) { - throw ArchitectureInvalidity("Architecture has diameter 0."); - } - std::vector dv(n - 1); - for (auto [n1, n2] : inter) { - unsigned dist = current_arc_.get_distance(n1, n2); - if (dist > 1) { - ++dv[n - dist]; - } - } - return dv; -} - -// Returns the distance between n1 and p1 and the distance between n2 and p2, -// distance ordered (greatest first) -const std::pair Routing::pair_dists( - const Node &n1, const Node &p1, const Node &n2, const Node &p2) const { - unsigned curr_dist1 = current_arc_.get_distance(n1, p1); - unsigned curr_dist2 = current_arc_.get_distance(n2, p2); - return (curr_dist1 > curr_dist2) ? std::make_pair(curr_dist1, curr_dist2) - : std::make_pair(curr_dist2, curr_dist1); -} - -// Determines if a proposed swap brings interacting qubits closer, improving -// board state. -bool Routing::swap_decreases( - const Swap &nodes, const Interactions &inte) const { - Node node1 = nodes.first; - Node pair1 = inte.at(node1); - Node node2 = nodes.second; - Node pair2 = inte.at(node2); - - if (pair1 == node2 || (node1 == pair1 && node2 == pair2)) { - return false; - } - const std::pair &curr_dists = - pair_dists(node1, pair1, node2, pair2); - const std::pair &news_dists = - pair_dists(node2, pair1, node1, pair2); - - return news_dists < curr_dists; -} - -// Given swap and distance vector, updates distance vector to reflect increment -// change due to swaps nodes -void Routing::increment_distance( - graphs::dist_vec &new_dist_vector, const Swap &pair, int increment) const { - const unsigned n = current_arc_.get_diameter(); - const unsigned dis_index = - n - current_arc_.get_distance(pair.first, pair.second); - if (dis_index < new_dist_vector.size()) { - new_dist_vector[dis_index] += increment; - } -} - -/* Overloaded method for updating temporary distance vectors due to proposed - * swaps */ -// Updates distance vector from proposed swap using global first slice -// interaction vector solve furthest only at this point ... - -// Updates distance vector from presented interaction vector -graphs::dist_vec Routing::update_distance_vector( - const Swap &nodes, std::vector new_dist_vector, - const Interactions &inte) const { - increment_distance(new_dist_vector, {nodes.first, inte.at(nodes.first)}, -2); - increment_distance( - new_dist_vector, {nodes.second, inte.at(nodes.second)}, -2); - increment_distance(new_dist_vector, {nodes.second, inte.at(nodes.first)}, 2); - increment_distance(new_dist_vector, {nodes.first, inte.at(nodes.second)}, 2); - return new_dist_vector; -} - -// Updates qmap to reflect performed swap -void Routing::update_qmap(qubit_bimap_t &map, const Swap &swap) { - const Qubit qb1 = map.right.at(swap.first); - const Qubit qb2 = map.right.at(swap.second); - map.right.erase(swap.first); - map.right.erase(swap.second); - map.left.insert({qb1, swap.second}); - map.left.insert({qb2, swap.first}); -} - -std::vector Routing::candidate_swaps( - const std::vector &trial_edges, - const Interactions &inte) const { - std::vector potential_swaps; - for (auto [node, adjacent_node] : trial_edges) { - if (inte.at(node) != node || inte.at(adjacent_node) != adjacent_node) { - Swap proposed = {node, adjacent_node}; - if (swap_decreases(proposed, inte)) { - potential_swaps.push_back(proposed); - } - } - } - return potential_swaps; -} - -// Move heuristic in try_all_swaps loop outside, for testing help and easy -// changing? -std::vector Routing::cowtan_et_al_heuristic( - std::vector &candidate_swaps, - const std::vector &base_dists, - const Interactions &interac) const { - const Swap winner = candidate_swaps.back(); - candidate_swaps.pop_back(); - std::vector winner_distances = - update_distance_vector(winner, base_dists, interac); - std::vector smaller_set; - smaller_set.push_back(winner); - for (const Swap &proposed_swap : candidate_swaps) { - const std::vector proposed_distances = - update_distance_vector(proposed_swap, base_dists, interac); - const int comp = - tri_lexicographical_comparison(proposed_distances, winner_distances); - if (comp == -1) { - smaller_set.push_back(proposed_swap); - } else if (comp == 1) { - smaller_set = {proposed_swap}; - winner_distances = proposed_distances; - } - } - return smaller_set; -} - -SwapResults Routing::try_all_swaps(const std::vector - &trial_edges) { // don't need to change - std::vector potential_swaps = candidate_swaps(trial_edges, interaction); - - if (potential_swaps.empty()) return {false, {Node(0), Node(0)}}; - - RoutingFrontier high_sf = slice_frontier_; - - for (unsigned i = 0; i < config_.depth_limit && !high_sf.slice->empty() && - potential_swaps.size() > 1; - i++) { - Interactions interac = - (i == 0) ? interaction : generate_interaction_frontier(high_sf); - std::vector base_dists = - (i == 0) ? dist_vector : generate_distance_vector(interac); - - potential_swaps = - cowtan_et_al_heuristic(potential_swaps, base_dists, interac); - - high_sf.next_slicefrontier(); - } - - return {1, potential_swaps.back()}; -} - -std::vector Routing::path_to_swaps(const std::vector &path) { - const unsigned len = path.size(); - std::vector output_swaps; - if (len > 2) { - unsigned halfway = len / 2; - for (unsigned i = 0; (i < halfway) || ((halfway + 2 + i) < len); i++) { - if (i < halfway) { - Swap sw1 = {path[i], path[i + 1]}; - output_swaps.push_back(sw1); - } - if ((halfway + 2 + i) < len) { - Swap sw2 = {path[len - i - 2], path[len - i - 1]}; - output_swaps.push_back(sw2); - } - } - } - return output_swaps; -} - -// If heuristic can't settle on a suitable single swap or pair of swaps, find a -// path between the two interacting qubits at greatest distance and swap along -// it. -bool Routing::solve_furthest() { - bool success = false; - std::optional max_node; - unsigned max_dist = 0; - for (auto [q1, q2] : interaction) { - unsigned dist = current_arc_.get_distance(q1, q2); - if (dist > max_dist) { - max_dist = dist; - max_node = q1; - } - } - if (!max_node.has_value()) { - throw ArchitectureInvalidity("Architecture is disconnected"); - } - Node root = *max_node; - if (max_dist > 1) { - Node target = interaction.at(root); - const std::vector path = current_arc_.get_path(root, target); - const std::vector swaps_to_perform = path_to_swaps(path); - for (const Swap &swap : swaps_to_perform) { - success = true; - add_swap(swap); - } - } - return success; -} - -void Routing::update_central_nodes( - const Swap &nodes, const Interactions &interac, - distributed_cx_info &candidate_distributed_cx) { - if (candidate_distributed_cx.first.first) { - // TODO: check that there isnt a better way than get_path to do this - std::vector path = - current_arc_.get_path(nodes.first, interac.at(nodes.first)); - candidate_distributed_cx.first.second = path[1]; - if (interac.at(path[1]) != path[1]) { - candidate_distributed_cx.first.first = false; - } - } - if (candidate_distributed_cx.second.first) { - // TODO: this uses solve furthest -> maybe a better way just - // using the distance matrix alone for a speed up? - std::vector path = - current_arc_.get_path(nodes.second, interac.at(nodes.second)); - candidate_distributed_cx.second.second = path[1]; - if (interac.at(path[1]) != path[1]) { - candidate_distributed_cx.second.first = false; - } - } -} - -// Difference in distance between interacting qubits between nodes in SWAP can -// only differ by 1 Compares the difference in distance between all given -// interactions, scales them dependent on how timesteps to interaction, and -// returns whether a distributed cx is desired. -void Routing::compare_distributed_cx_distances( - distributed_cx_info &candidate_distributed_cx, - const std::pair, std::vector> &inter_node) { - std::pair distance_check = {0, 0}; - for (unsigned i = 1; i < inter_node.first.size(); i++) { - distance_check.first += pow(i, config_.distrib_exponent) * - (int(current_arc_.get_distance( - inter_node.second[0], inter_node.first[i])) - - int(current_arc_.get_distance( - inter_node.first[0], inter_node.first[i]))); - } - for (unsigned i = 1; i < inter_node.second.size(); i++) { - distance_check.second += pow(i, config_.distrib_exponent) * - (int(current_arc_.get_distance( - inter_node.first[0], inter_node.second[i])) - - int(current_arc_.get_distance( - inter_node.second[0], inter_node.second[i]))); - } - if (distance_check.first < 0) { - candidate_distributed_cx.first.first = false; - } - if (distance_check.second < 0) { - candidate_distributed_cx.second.first = false; - } -} - -bool check_vertex_is_CX(const Circuit &circ_, const Vertex &v) { - OpType ot = circ_.get_OpType_from_Vertex(v); - if (ot != OpType::CX) { - if (ot == OpType::Conditional) { - const Conditional &b = - static_cast(*circ_.get_Op_ptr_from_Vertex(v)); - if (b.get_op()->get_type() != OpType::CX) { - return false; - } - } else { - return false; - } - } - return true; -} -// Method is supplied with a pair of nods with the intention of being swapped. -// Before this SWAP gate is added, this method considers whether a distributed -// CX gate between interacting qubits distance 2 away from eachother is a better -// option The returned bool pair instructs perform_action whether to add a -// distributed CX gate between nodes.first and its partner node and nodes.second -// and its partner node respectively -distributed_cx_info Routing::check_distributed_cx(const Swap &nodes) { - // 1) Determine which nodes in SWAP gate could complete their CX with a - // distributed CX gate instead - distributed_cx_info candidate_distributed_cx = { - {current_arc_.get_distance(nodes.first, interaction[nodes.first]) == 2, - Node(0)}, - {current_arc_.get_distance(nodes.second, interaction[nodes.second]) == 2, - Node(0)}}; - // 1 pt2) Is the vertex a CX gate or Conditioned CX gate? - auto cx_check = [&](bool candidate, const Qubit &qb) { - if (candidate) - return check_vertex_is_CX( - circ_, - circ_.target(slice_frontier_.quantum_in_edges->find(qb)->second)); - return true; - }; - if (!cx_check( - candidate_distributed_cx.first.first, qmap.right.at(nodes.first))) - return {{false, Node(0)}, {false, Node(0)}}; - if (!cx_check( - candidate_distributed_cx.second.first, qmap.right.at(nodes.second))) - return {{false, Node(0)}, {false, Node(0)}}; - - if (candidate_distributed_cx.first.first || - candidate_distributed_cx.second.first) { - // 2) Find number of next interactions for node in SWAP equivalent to - // config, or reached within depth limit - std::pair, std::vector> inter_node = { - {nodes.first}, {nodes.second}}; - std::pair ni_limit = {0, 0}; - - RoutingFrontier high_sf = slice_frontier_; - - for (unsigned i = 0; i < config_.distrib_limit && !high_sf.slice->empty() && - (ni_limit.first < config_.interactions_limit || - ni_limit.second < config_.interactions_limit); - i++) { - // Find interaction frontier for current slice and find the interacting - // pairs of nodes for incident SWAP gate. - Interactions interac = - (i == 0) ? interaction : generate_interaction_frontier(high_sf); - - if (nodes.first != interac[nodes.first] && - ni_limit.first < config_.interactions_limit) { - inter_node.first.push_back(interac[nodes.first]); - ni_limit.first++; - } - if (nodes.second != interac[nodes.second] && - ni_limit.second < config_.interactions_limit) { - inter_node.second.push_back(interac[nodes.second]); - ni_limit.second++; - } - high_sf.next_slicefrontier(); - } - if (ni_limit.first > 0 && ni_limit.second > 0) { - // 3) Compare difference in distances between interacting qubits given the - // permutation of qubits from added SWAP gate, or not. - compare_distributed_cx_distances(candidate_distributed_cx, inter_node); - if (candidate_distributed_cx.first.first || - candidate_distributed_cx.second.first) { - // 4) If desirable, find the central node of the bridge. - update_central_nodes(nodes, interaction, candidate_distributed_cx); - return candidate_distributed_cx; - } - } - } - return {{false, Node(0)}, {false, Node(0)}}; -} - -// Give a node with a control qubit on it, finds its respective target node and -// node between them, and replaces the CX gate between the control and target -// with a distributed CX -void Routing::add_distributed_cx( - const Node &cx_node_0, const Node &cx_node_1, const Node ¢ral_node) { - // Find interacting node for starting_node, find node between them. Also swap - // control and target node if necessary. - - if (current_arc_.get_distance(cx_node_0, cx_node_1) != 2) { - throw BridgeInvalid("Bridge Nodes are not distance 2 apart."); - } - if (current_arc_.get_distance(cx_node_0, central_node) != 1 || - current_arc_.get_distance(cx_node_1, central_node) != 1) { - throw BridgeInvalid( - "Central BRIDGE node not adjacent to Control and Target " - "nodes."); - } - - route_stats.bridge_count++; - Edge edge_0 = - slice_frontier_.quantum_in_edges->find(qmap.right.at(cx_node_0))->second; - Edge edge_1 = - slice_frontier_.quantum_in_edges->find(qmap.right.at(cx_node_1))->second; - - // Assign control and target nodes from cx_node_0 and cx_node_1 - // Depends on the port ordering of the cx_node_0 and cx_node_1 corresponding - // edges attached to the CX vertex - Node control_node, target_node; - if (circ_.get_ports(edge_1).second < circ_.get_ports(edge_0).second) { - control_node = cx_node_1; - target_node = cx_node_0; - } else { - control_node = cx_node_0; - target_node = cx_node_1; - } - - // Find qubits associated to each node - const Qubit control_qb = qmap.right.at(control_node); - const Qubit central_qb = qmap.right.at(central_node); - const Qubit target_qb = qmap.right.at(target_node); - - // Initialize variables appropriate for substituting Conditionals with CX - // gates to Conditionals with BRIDGE gates. - Op_ptr new_bridge_ptr; - EdgeVec b_in_edges = {}; - OpType gate_op; - std::vector> classical_edge_info = {}; - - Vertex to_be_replaced = slice_frontier_.circ.target( - slice_frontier_.quantum_in_edges->find(control_qb)->second); - // If OpType is a Conditional{CX}, replace with Conditional{BRIDGE} instead - if (circ_.get_OpType_from_Vertex(to_be_replaced) == OpType::Conditional) { - Op_ptr pt = circ_.get_Op_ptr_from_Vertex(to_be_replaced); - const Conditional &b = static_cast( - *circ_.get_Op_ptr_from_Vertex(to_be_replaced)); - gate_op = b.get_op()->get_type(); - new_bridge_ptr = std::make_shared( - get_op_ptr(OpType::BRIDGE, std::vector(), 3), b.get_width(), - b.get_value()); - // Also collect any classical in edges - b_in_edges = circ_.get_in_edges_of_type(to_be_replaced, EdgeType::Boolean); - for (Edge e : b_in_edges) { - classical_edge_info.push_back( - {circ_.source(e), circ_.get_source_port(e), - circ_.get_target_port(e)}); - } - } else { // else make normal bridge - new_bridge_ptr = get_op_ptr(OpType::BRIDGE); - gate_op = circ_.get_OpType_from_Vertex(to_be_replaced); - } - - if (gate_op != OpType::CX) { - throw BridgeInvalid( - "OpType::BRIDGE being substituted for a vertex that isn't " - "OpType::CX. Please rebase two-qubit primitive to CX gate."); - } - // Collect all required Quantum edge information - - Edge control_in_edge = - slice_frontier_.quantum_in_edges->find(control_qb)->second; - Edge control_out_edge = - slice_frontier_.quantum_out_edges->find(control_qb)->second; - Edge central_edge = - slice_frontier_.quantum_in_edges->find(central_qb)->second; - Edge target_in_edge = - slice_frontier_.quantum_in_edges->find(target_qb)->second; - Edge target_out_edge = - slice_frontier_.quantum_out_edges->find(target_qb)->second; - - VertPort control_pred = { - circ_.source(control_in_edge), circ_.get_source_port(control_in_edge)}; - VertPort central_pred = { - circ_.source(central_edge), circ_.get_source_port(central_edge)}; - VertPort target_pred = { - circ_.source(target_in_edge), circ_.get_source_port(target_in_edge)}; - - VertPort control_succ = { - circ_.target(control_out_edge), circ_.get_target_port(control_out_edge)}; - VertPort central_succ = { - circ_.target(central_edge), circ_.get_target_port(central_edge)}; - VertPort target_succ = { - circ_.target(target_out_edge), circ_.get_target_port(target_out_edge)}; - - // remove old vertex, add new vertex - circ_.remove_vertex( - to_be_replaced, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); - Vertex bridge_vert = circ_.add_vertex(new_bridge_ptr); - // add Boolean edges - for (std::tuple vpp : classical_edge_info) { - circ_.add_edge( - {std::get<0>(vpp), std::get<1>(vpp)}, {bridge_vert, std::get<2>(vpp)}, - EdgeType::Boolean); - } - - unsigned num_classicals = classical_edge_info.size(); - // add control qubit in edge - Edge control_in = circ_.add_edge( - control_pred, {bridge_vert, num_classicals}, EdgeType::Quantum); - // add control qubit out edge - Edge control_out = circ_.add_edge( - {bridge_vert, num_classicals}, control_succ, EdgeType::Quantum); - // add central qubit in edge - Edge central_in = circ_.add_edge( - central_pred, {bridge_vert, num_classicals + 1}, EdgeType::Quantum); - // add central qubit out edge - Edge central_out = circ_.add_edge( - {bridge_vert, num_classicals + 1}, central_succ, EdgeType::Quantum); - // add target qubit in edge - Edge target_in = circ_.add_edge( - target_pred, {bridge_vert, num_classicals + 2}, EdgeType::Quantum); - // add target qubit out edge - Edge target_out = circ_.add_edge( - {bridge_vert, num_classicals + 2}, target_succ, EdgeType::Quantum); - - // Remove central_edge which is now going through BRIDGE vertex - circ_.remove_edge(central_edge); - - unit_frontier_t::iterator control_qb_in_it = - slice_frontier_.quantum_in_edges->find(control_qb); - unit_frontier_t::iterator central_qb_in_it = - slice_frontier_.quantum_in_edges->find(central_qb); - unit_frontier_t::iterator target_qb_in_it = - slice_frontier_.quantum_in_edges->find(target_qb); - - slice_frontier_.quantum_in_edges->replace( - control_qb_in_it, {control_qb, control_in}); - slice_frontier_.quantum_in_edges->replace( - central_qb_in_it, {central_qb, central_in}); - slice_frontier_.quantum_in_edges->replace( - target_qb_in_it, {target_qb, target_in}); - - // Update slice frontier out edges - unit_frontier_t::iterator control_qb_out_it = - slice_frontier_.quantum_out_edges->find(control_qb); - unit_frontier_t::iterator central_qb_out_it = - slice_frontier_.quantum_out_edges->find(central_qb); - unit_frontier_t::iterator target_qb_out_it = - slice_frontier_.quantum_out_edges->find(target_qb); - - slice_frontier_.quantum_out_edges->replace( - control_qb_out_it, {control_qb, control_out}); - slice_frontier_.quantum_out_edges->replace( - central_qb_out_it, {central_qb, central_out}); - slice_frontier_.quantum_out_edges->replace( - target_qb_out_it, {target_qb, target_out}); - - // Remove CX vertex from Slice (i.e. VertexVec) in slice_frontier- - slice_frontier_.slice->erase( - std::remove( - slice_frontier_.slice->begin(), slice_frontier_.slice->end(), - to_be_replaced), - slice_frontier_.slice->end()); - slice_frontier_.slice->push_back(bridge_vert); -} - -// Suitable swap found, amend all global constructs -void Routing::add_swap(const Swap &nodes) { - route_stats.swap_count++; - const Qubit qb1 = qmap.right.at(nodes.first); - const Qubit qb2 = qmap.right.at(nodes.second); - - update_qmap(qmap, nodes); - - // --- --X--\ /-- - // = | X - // --- --X--/ \-- - // So we insert a SWAP gate and perform the wire swap by changing the output - // ports - - // find edges using qubits - EdgeVec preds = { - slice_frontier_.quantum_in_edges->find(qb1)->second, - slice_frontier_.quantum_in_edges->find(qb2)->second}; - - Vertex swap_v = circ_.add_vertex(OpType::SWAP); - circ_.rewire(swap_v, preds, {EdgeType::Quantum, EdgeType::Quantum}); - EdgeVec swap_outs = circ_.get_all_out_edges(swap_v); - - circ_.dag[swap_outs[0]].ports.first = 1; - circ_.dag[swap_outs[1]].ports.first = 0; - unit_frontier_t::iterator qb1_in_it = - slice_frontier_.quantum_in_edges->find(qb1); - slice_frontier_.quantum_in_edges->replace(qb1_in_it, {qb1, swap_outs[0]}); - unit_frontier_t::iterator qb2_in_it = - slice_frontier_.quantum_in_edges->find(qb2); - slice_frontier_.quantum_in_edges->replace(qb2_in_it, {qb2, swap_outs[1]}); - unit_frontier_t::iterator qb1_out_it = - slice_frontier_.quantum_out_edges->find(qb1); - unit_frontier_t::iterator qb2_out_it = - slice_frontier_.quantum_out_edges->find(qb2); - if (preds[0] == qb1_out_it->second) { - slice_frontier_.quantum_out_edges->replace(qb1_out_it, {qb1, swap_outs[0]}); - } else if (preds[1] == qb2_out_it->second) { - slice_frontier_.quantum_out_edges->replace(qb2_out_it, {qb2, swap_outs[1]}); - } -} - -void Routing::perform_action(const Swap &nodes) { - distributed_cx_info cdcx = check_distributed_cx(nodes); - if (cdcx.first - .first) { // in current heuristic, both nodes in SWAP being distance - // two from target, and closer to next interaction if not - // permuted is exceptionally rare (never so far...) - Node temp = nodes.first; - add_distributed_cx(temp, interaction[nodes.first], cdcx.first.second); - } else if (cdcx.second.first) { - Node temp = nodes.second; - add_distributed_cx(temp, interaction[nodes.second], cdcx.second.second); - } else { - add_swap(nodes); - } -} -} // namespace tket diff --git a/tket/src/Routing/include/Routing/Routing.hpp b/tket/src/Routing/include/Routing/Routing.hpp deleted file mode 100644 index beade56d99..0000000000 --- a/tket/src/Routing/include/Routing/Routing.hpp +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include -#include - -#include "Architecture/Architecture.hpp" -#include "Circuit/Circuit.hpp" -#include "Placement.hpp" -#include "Utils/BiMapHeaders.hpp" -#include "Utils/Json.hpp" - -namespace tket { - -// 2 (adjacent) nodes proposed to have their concurrent qubit states swapped -typedef std::pair Swap; -// node i is interacting with element (j) at i, if i==j not interacting -typedef std::map Interactions; -typedef std::vector qubit_map_vector_t; -typedef std::pair, std::pair> - distributed_cx_info; -// TODO remove -// qubit_map_vector_t map2vec(qubit_bimap_t map, unsigned total); -struct SwapResults { // results of try_all_swaps algorithm - bool success; - Swap swap; -}; - -/* Error Handling for Routing Circuits */ -class ArchitectureMismatch : public std::logic_error { - public: - ArchitectureMismatch(unsigned circ_no, unsigned arch_no) - : std::logic_error( - std::to_string(circ_no) + " " + std::to_string(arch_no)) { - tket_log()->error( - "Incorrect number of nodes in the architecture. " - "Qubits in circuit: {}, nodes in architecture: {}", - circ_no, arch_no); - } -}; - -class QMapRange : public std::logic_error { - public: - explicit QMapRange(const std::string &message) : std::logic_error(message) {} -}; - -class NodesRange : public std::logic_error { - public: - NodesRange(int nodes, int qubit) - : std::logic_error(std::to_string(nodes) + " " + std::to_string(qubit)) { - tket_log()->error( - "Qubit indexing larger than number of available qubits." - "Available Qubits: {}, Qubit Index: {}", - nodes, qubit); - } -}; - -class ArchitectureFull : public std::logic_error { - public: - ArchitectureFull() - : std::logic_error( - "No suitable node found in findBestNode => all nodes already " - "used") {} -}; - -class NodeAlreadyActive : public std::logic_error { - public: - explicit NodeAlreadyActive(int node) - : std::logic_error(std::to_string(node)) { - tket_log()->error("Node {} already active.", node); - } -}; - -class NodeInactive : public std::logic_error { - public: - explicit NodeInactive(int node) : std::logic_error(std::to_string(node)) { - tket_log()->error("Node {} inactive.", node); - } -}; - -class RoutingFailure : public std::logic_error { - public: - RoutingFailure() - : std::logic_error( - "Routing failed to complete. Note: Check your architecture " - "is connected.") {} -}; - -class BridgeInvalid : public std::logic_error { - public: - explicit BridgeInvalid(const std::string &message) - : std::logic_error(message) {} -}; - -class BridgePathIncorrect : public std::logic_error { - public: - explicit BridgePathIncorrect(int path_size) - : std::logic_error(std::to_string(path_size)) { - tket_log()->error("Path found has size {} which is invalid.", path_size); - } -}; - -// structure of configuration parameters for routing -struct RoutingConfig { - // circuit look ahead limit for SWAP picking - unsigned depth_limit; - // circuit look ahead limit for Distributed CX gate checking - unsigned distrib_limit; - // number of interactions considered in Distributed CX gate checking - unsigned interactions_limit; - // Whether to use a Distributed CX gate instead of a SWAP and a CX is - // determined by comparing the distance between some interacting pairs of - // qubits with and without the permutation. Changing distrib_exponent changes - // how much later interactions are considered. distrib_exponent < 0 => less - // effect from later interactoins, distrib_exponent > 0 => greater effect, - // distrib_exponent = 0 => no effect - double distrib_exponent; - // Constructors - RoutingConfig( - unsigned _depth_limit, unsigned _distrib_limit, - unsigned _interactions_limit, const double &_distrib_exponent) - : depth_limit(_depth_limit), - distrib_limit(_distrib_limit), - interactions_limit(_interactions_limit), - distrib_exponent(_distrib_exponent) {} - - RoutingConfig() : RoutingConfig(50, 75, 10, 0) {} - - bool operator==(const RoutingConfig &other) const; -}; - -JSON_DECL(RoutingConfig) - -// stores and tracks the points of the circuit up to which has been solved -struct RoutingFrontier { - // set of 2qb vertices which need to be solved for - std::shared_ptr slice; - // Quantum Edges coming in to vertices in slice, indexed by qubit - std::shared_ptr quantum_in_edges; - // Quantum Edges leaving vertices in slice, indexed by qubit - std::shared_ptr quantum_out_edges; - // Boolean edges coming in to vertices in slice. Guarantees that all edges - // into every vertex in slice is represented in next_cut - std::shared_ptr classical_in_edges; - - // reference to circuit that it acts on - const Circuit ˆ - - explicit RoutingFrontier(const Circuit &_circ); - // initialise at front of circuit - void init(); - // move to next slice - void next_slicefrontier(); -}; - -// remove node from architecture as long as subgraph remains connected. Nodes -// not in map from architecture if possible -void remove_unmapped_nodes( - Architecture &arc, qubit_bimap_t &map, Circuit &circ); - -bool subgraph_remove_if_connected( - Architecture &arc, const Architecture &subarc, const Node &node); - -// remove nodes not in map from architecture if possible -void remove_unmapped_nodes( - Architecture &arc, qubit_bimap_t &map, Circuit &circ); - -Circuit autoroute(const Circuit &circ, const Architecture &arc); - -class RoutingTester; -/* Routing class, contains solve method for transforming a circuit such that -all it's multi-qubit interactions are adjacent for some specificed architecture. -*/ -class Routing { - public: - struct Stats { - unsigned n_try_all_swaps; - unsigned n_solve_furthest; - unsigned swap_count; - unsigned bridge_count; - Stats() - : n_try_all_swaps(0), - n_solve_furthest(0), - swap_count(0), - bridge_count(0) {} - }; - - /* Class Constructor */ - Routing(const Circuit &_circ, const Architecture &_arc); - /* Solve Method */ - // solve using default mapping (line_placement) and default config - // Default RoutingConfig provides a set of parameters that use all available - // features of Routing, but are not specialised for a certain architecture: - // depth_limit = 50 - // distrib_limit = 75 - // interactions_limit = 10 - // distrib_exponent = 0 - // This configuration is used for any solve method that does not have config - // specified. - - // solve with default mapping and provided config - std::pair solve(const RoutingConfig &_config = {}); - qubit_bimap_t remap(const qubit_bimap_t &init); - void organise_registers_and_maps(); - - // TODO:: Make relevant and useful again - qubit_mapping_t return_final_map() const; - qubit_mapping_t return_initial_map() const; - /* Getters*/ - std::vector get_active_nodes() const; - - RoutingFrontier get_slicefrontier() const { return slice_frontier_; } - Stats get_stats() const { return route_stats; } - - private: - // Circuit being solved - Circuit circ_; - // RoutingFrontier tracking the position whcih has been solved up to - RoutingFrontier slice_frontier_; - // Configuration settings for routing - RoutingConfig config_; - - // Architecture being solved for and the original architecture given - Architecture current_arc_; - Architecture original_arc_; - - // Which qubits are interacting and total distance of a board state for - // interacting qubits - Interactions interaction; - // Total distance of a board state for interacting qubits - graphs::dist_vec dist_vector; - - Stats route_stats; - - boundary_t original_boundary; - - // Various qubit mappings. Qmap is used as the algorithim proceeds, the - // initial map is assigned from placement and the final map displays where - // qubits end up while routed. Relative mapping is what the final mapping - // would be if initial mapping was sequential. - qubit_bimap_t qmap, init_map, final_map; - - /* Swap_Analysis.cpp methods */ - // Methods used in determining the best Swap for a given board state and - // implementing it - void increment_distance( - graphs::dist_vec &new_dist_vector, const Swap &pair, int increment) const; - graphs::dist_vec generate_distance_vector(const Interactions &inter) const; - graphs::dist_vec update_distance_vector( - const Swap &nodes, graphs::dist_vec new_dist_vector, - const Interactions &inte) const; - const std::pair pair_dists( - const Node &n1, const Node &p1, const Node &n2, const Node &p2) const; - bool swap_decreases(const Swap &nodes, const Interactions &inte) const; - std::vector candidate_swaps( - const std::vector &trial_edges, - const Interactions &inte) const; - std::vector cowtan_et_al_heuristic( - std::vector &candidate_swaps, const graphs::dist_vec &base_dists, - const Interactions &interac) const; - SwapResults try_all_swaps( - const std::vector &trial_edges); - - static void update_qmap(qubit_bimap_t &map, const Swap &swap); - void update_central_nodes( - const Swap &nodes, const Interactions &interac, - distributed_cx_info &candidate_distributed_cx); - - void compare_distributed_cx_distances( - distributed_cx_info &candidate_distributed_cx, - const std::pair, std::vector> &inter_node); - distributed_cx_info check_distributed_cx(const Swap &nodes); - void add_distributed_cx( - const Node &control_node, const Node &target_node, - const Node ¢ral_node); - void add_swap(const Swap &nodes); - void perform_action(const Swap &nodes); - - // Dijkstras algorithm methods - static std::vector path_to_swaps(const std::vector &path); - - bool solve_furthest(); - - /* Slice_Maniupation.cpp methods */ - // find nodes for qubits, activating if necessary - std::vector nodes_from_qubits(const qubit_vector_t &qubs); - // Advances slice frontier past any two_qubit operations on adjacent nodes - bool advance_frontier(); - - bool circuit_modified() const; - - friend class RoutingTester; - // generate interaction vectors from slice_frontiers, qubits=true means return - // qubit interactions rather than node - Interactions generate_interaction_frontier( - const RoutingFrontier &slice_front); - /* Qubit_Placement.cpp methods */ - // Methods for producing a good intial qubit mapping to an architecture from - // given circuit - - // void print_qubitlines(QubitLineList &in); - - /* Board_Analysis.cpp routing methods */ - Node find_best_inactive_node( - const Node &target_node, const Architecture &arc) const; - void activate_node(const Node &node); - void reactivate_qubit(const Qubit &qb, const Qubit &target); -}; - -class RoutingTester { - private: - Routing *router; - - public: - explicit RoutingTester(Routing *_router) : router(_router) {} - - Interactions get_interaction(const RoutingFrontier &sf); - void set_qmap(qubit_bimap_t _qmap); - void next_sf(RoutingFrontier &sf); - Circuit *get_circ(); - void set_config(const RoutingConfig &_config); - // Wrappers of private methods for testing? - void increment_distance( - graphs::dist_vec &new_dist_vector, const Swap &pair, int increment) const; - graphs::dist_vec generate_distance_vector(const Interactions &inter) const; - graphs::dist_vec update_distance_vector( - const Swap &nodes, graphs::dist_vec new_dist_vector, - const Interactions &inte) const; - const std::pair pair_dists( - const Node &n1, const Node &p1, const Node &n2, const Node &p2) const; - bool swap_decreases(const Swap &nodes, const Interactions &inte) const; - std::vector candidate_swaps( - const std::vector &trial_edges, - const Interactions &inte) const; - std::vector cowtan_et_al_heuristic( - std::vector &candidate_swaps, const graphs::dist_vec &base_dists, - const Interactions &interac) const; - void update_qmap(qubit_bimap_t &map, const Swap &swap); - std::vector path_to_swaps(const std::vector &path) const; - qubit_bimap_t set_default_initial_map( - std::optional canonical_node_order = std::nullopt); - void initialise_slicefrontier(); - void add_distributed_cx( - const Node &control_node, const Node &target_node, - const Node ¢ral_node); - distributed_cx_info check_distributed_cx(const Swap &nodes); - void advance_frontier(); - void set_interaction(); -}; - -} // namespace tket diff --git a/tket/src/Simulation/BitOperations.cpp b/tket/src/Simulation/BitOperations.cpp index 02da5f69de..4ccb7fddf9 100644 --- a/tket/src/Simulation/BitOperations.cpp +++ b/tket/src/Simulation/BitOperations.cpp @@ -16,6 +16,8 @@ #include +#include "Utils/Assert.hpp" + namespace tket { namespace tket_sim { namespace internal { @@ -44,9 +46,7 @@ ExpansionData get_expansion_data( auto test_bit = next_bit; for (unsigned left_shift_arg = 0;; ++left_shift_arg) { if ((test_bit & forbidden_bits) == 0) { - if (test_bit == 0) { - throw std::runtime_error("Ran out of bits"); - } + TKET_ASSERT(test_bit != 0); // A free space has been found. push_back(result, next_bit, left_shift_arg); forbidden_bits |= test_bit; diff --git a/tket/src/Simulation/GateNode.cpp b/tket/src/Simulation/GateNode.cpp index fe40d96ef2..0e9cb2c455 100644 --- a/tket/src/Simulation/GateNode.cpp +++ b/tket/src/Simulation/GateNode.cpp @@ -225,9 +225,8 @@ static void set_lifted_triplets( const SimUInt free_bits_limit = get_matrix_size(full_number_of_qubits - qubits.size()); - if (free_bits_limit == 0) { - throw std::runtime_error("Too many bits"); - } + TKET_ASSERT(free_bits_limit != 0 || !"Too many bits"); + for (SimUInt free_bits = 0; free_bits < free_bits_limit; ++free_bits) { const SimUInt expanded_free_bits = get_expanded_bits(expansion_data, free_bits); diff --git a/tket/src/TokenSwapping/BestFullTsa.cpp b/tket/src/TokenSwapping/BestFullTsa.cpp new file mode 100644 index 0000000000..89966b8c98 --- /dev/null +++ b/tket/src/TokenSwapping/BestFullTsa.cpp @@ -0,0 +1,51 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/BestFullTsa.hpp" + +#include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "TokenSwapping/VertexMapResizing.hpp" + +namespace tket { + +using namespace tsa_internal; + +BestFullTsa::BestFullTsa() { m_name = "BestFullTsa"; } + +void BestFullTsa::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + RiverFlowPathFinder& path_finder) { + auto vm_copy = vertex_mapping; + + m_hybrid_tsa.append_partial_solution( + swaps, vm_copy, distances, neighbours, path_finder); + + // Still subject to experimentation, but this seems the best + m_swap_list_optimiser.optimise_pass_with_zero_travel(swaps); + m_swap_list_optimiser.optimise_pass_with_token_tracking(swaps); + m_swap_list_optimiser.optimise_pass_remove_empty_swaps(swaps, vertex_mapping); + m_swap_list_optimiser.full_optimise(swaps, vertex_mapping); + + VertexMapResizing map_resizing(neighbours); + std::set vertices_with_tokens_at_start; + for (const auto& entry : vertex_mapping) { + vertices_with_tokens_at_start.insert(entry.first); + } + m_table_optimiser.optimise( + vertices_with_tokens_at_start, map_resizing, swaps, + m_swap_list_optimiser); +} + +} // namespace tket diff --git a/tket/src/TokenSwapping/CMakeLists.txt b/tket/src/TokenSwapping/CMakeLists.txt new file mode 100644 index 0000000000..aa0d9f275e --- /dev/null +++ b/tket/src/TokenSwapping/CMakeLists.txt @@ -0,0 +1,66 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project(tket-${COMP}) + +if (NOT ${COMP} STREQUAL "TokenSwapping") + message(FATAL_ERROR "Unexpected component name.") +endif() + +add_library(tket-${COMP} + BestFullTsa.cpp + CyclesCandidateManager.cpp + CyclesGrowthManager.cpp + CyclesPartialTsa.cpp + CyclicShiftCostEstimate.cpp + DistancesInterface.cpp + DynamicTokenTracker.cpp + HybridTsa.cpp + NeighboursInterface.cpp + PartialTsaInterface.cpp + RiverFlowPathFinder.cpp + SwapListOptimiser.cpp + TrivialTSA.cpp + VectorListHybridSkeleton.cpp + TSAUtils/DistanceFunctions.cpp + TSAUtils/SwapFunctions.cpp + TSAUtils/VertexMappingFunctions.cpp + TSAUtils/VertexSwapResult.cpp + TableLookup/CanonicalRelabelling.cpp + TableLookup/ExactMappingLookup.cpp + TableLookup/FilteredSwapSequences.cpp + TableLookup/PartialMappingLookup.cpp + TableLookup/SwapConversion.cpp + TableLookup/SwapListSegmentOptimiser.cpp + TableLookup/SwapListTableOptimiser.cpp + TableLookup/SwapSequenceTable.cpp + TableLookup/VertexMapResizing.cpp + ) + +list(APPEND DEPS_${COMP} + Utils) + +foreach(DEP ${DEPS_${COMP}}) + target_include_directories( + tket-${COMP} PRIVATE ${TKET_${DEP}_INCLUDE_DIR}) + target_link_libraries( + tket-${COMP} PRIVATE tket-${DEP}) +endforeach() + +target_include_directories(tket-${COMP} + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${TKET_${COMP}_INCLUDE_DIR} + ${TKET_${COMP}_INCLUDE_DIR}/${COMP}) + diff --git a/tket/src/TokenSwapping/CyclesCandidateManager.cpp b/tket/src/TokenSwapping/CyclesCandidateManager.cpp new file mode 100644 index 0000000000..fdf52a2286 --- /dev/null +++ b/tket/src/TokenSwapping/CyclesCandidateManager.cpp @@ -0,0 +1,213 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "CyclesCandidateManager.hpp" + +#include +#include +#include + +#include "Utils/Assert.hpp" +#include "VertexSwapResult.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +size_t CyclesCandidateManager::fill_initial_cycle_ids(const Cycles& cycles) { + m_cycle_with_vertex_hash.clear(); + m_cycles_to_keep.clear(); + size_t cycle_length = 0; + for (auto id_opt = cycles.front_id(); id_opt; + id_opt = cycles.next(id_opt.value())) { + const auto& cycle = cycles.at(id_opt.value()); + const auto& vertices = cycle.vertices; + + if (cycle_length == 0) { + cycle_length = vertices.size(); + TKET_ASSERT(cycle_length >= 2); + } else { + TKET_ASSERT(cycle_length == vertices.size()); + } + TKET_ASSERT(cycle.decrease > 0); + + // We want 50*(decrease)/(num swaps) >= min_candidate_power_percentage. + // (We multiply by 50 because a swap can change L by 2, not 1). + if (50 * static_cast(cycle.decrease) < + (m_options.min_candidate_power_percentage * cycle_length)) { + continue; + } + + CycleData cycle_data; + cycle_data.id = id_opt.value(); + cycle_data.first_vertex_index = 0; + for (size_t ii = 1; ii < vertices.size(); ++ii) { + if (vertices[ii] < vertices[cycle_data.first_vertex_index]) { + cycle_data.first_vertex_index = ii; + } + } + size_t hash = static_cast(cycle.decrease); + for (size_t ii = 0; ii < cycle_length; ++ii) { + boost::hash_combine( + hash, vertices[(ii + cycle_data.first_vertex_index) % cycle_length]); + } + const auto prev_cycle_citer = m_cycle_with_vertex_hash.find(hash); + if (prev_cycle_citer == m_cycle_with_vertex_hash.cend()) { + m_cycle_with_vertex_hash[hash] = cycle_data; + } else { + // A previous cycle with this hash; but is it equal? + const auto& previous_cycle_data = prev_cycle_citer->second; + const auto& previous_cycle = cycles.at(previous_cycle_data.id); + if (previous_cycle.decrease == cycle.decrease) { + bool equal_vertices = true; + for (size_t ii = 0; ii < cycle_length; ++ii) { + if (previous_cycle.vertices + [(ii + previous_cycle_data.first_vertex_index) % + cycle_length] != + cycle.vertices + [(ii + cycle_data.first_vertex_index) % cycle_length]) { + equal_vertices = false; + break; + } + } + if (equal_vertices) { + // This new cycle is just the previous cycle repeated, + // but starting from a different vertex + continue; + } + } + } + m_cycles_to_keep.push_back(cycle_data.id); + } + return cycle_length; +} + +void CyclesCandidateManager::discard_lower_power_solutions( + const Cycles& cycles) { + int highest_decrease = 0; + for (auto id : m_cycles_to_keep) { + highest_decrease = std::max(highest_decrease, cycles.at(id).decrease); + } + TKET_ASSERT(highest_decrease > 0); + + for (size_t ii = 0; ii < m_cycles_to_keep.size();) { + if (cycles.at(m_cycles_to_keep[ii]).decrease < highest_decrease) { + // This cycle is not good enough. + // Erase this ID, by swapping with the back + m_cycles_to_keep[ii] = m_cycles_to_keep.back(); + m_cycles_to_keep.pop_back(); + continue; + } + // Keep this ID. Onto the next! + ++ii; + } +} + +void CyclesCandidateManager::sort_candidates(const Cycles& cycles) { + // Greedy heuristic: we want the maximal number of disjoint cycles. + // So, choose those which touch few others first. + // Experimentation is needed with other algorithms! + m_touching_data.clear(); + for (size_t ii = 0; ii < m_cycles_to_keep.size(); ++ii) { + // Automatically set to zero on first use. + m_touching_data[m_cycles_to_keep[ii]]; + + for (size_t jj = ii + 1; jj < m_cycles_to_keep.size(); ++jj) { + bool touches = false; + // For short cycles, not much slower than using sets + // or sorted vectors. + for (auto v1 : cycles.at(m_cycles_to_keep[ii]).vertices) { + if (touches) { + break; + } + for (auto v2 : cycles.at(m_cycles_to_keep[jj]).vertices) { + if (v1 == v2) { + touches = true; + break; + } + } + } + if (touches) { + ++m_touching_data[m_cycles_to_keep[ii]]; + ++m_touching_data[m_cycles_to_keep[jj]]; + } + } + } + // Now, sort... + auto& touching_data = m_touching_data; + std::sort( + m_cycles_to_keep.begin(), m_cycles_to_keep.end(), + [&touching_data](Cycles::ID lhs, Cycles::ID rhs) { + const auto lhs_touch_number = touching_data.at(lhs); + const auto rhs_touch_number = touching_data.at(rhs); + + // Don't JUST sort on the touch number, because then the order + // of equal-touch-number elements would be implementation dependent + // (i.e., not a "stable" sort across all platforms/compilers). + return (lhs_touch_number < rhs_touch_number) || + (lhs_touch_number == rhs_touch_number && lhs < rhs); + }); +} + +bool CyclesCandidateManager::should_add_swaps_for_candidate( + const Cycles& cycles, Cycles::ID id) { + const auto& cycle = cycles.at(id); + const auto& vertices = cycle.vertices; + for (auto v : vertices) { + if (m_vertices_used.count(v) != 0) { + return false; + } + } + for (auto v : vertices) { + m_vertices_used.insert(v); + } + return true; +} + +void CyclesCandidateManager::append_partial_solution( + const CyclesGrowthManager& growth_manager, SwapList& swaps, + VertexMapping& vertex_mapping) { + const auto& cycles = growth_manager.get_cycles(); + const size_t cycle_size = fill_initial_cycle_ids(cycles); + + if (m_cycles_to_keep.empty()) { + return; + } + const bool keep_lower_power_solutions = + (cycle_size == 2) + ? m_options.return_all_good_single_swaps + : m_options.return_lower_power_solutions_for_multiswap_candidates; + + if (!keep_lower_power_solutions) { + discard_lower_power_solutions(cycles); + } + sort_candidates(cycles); + m_vertices_used.clear(); + + // It's the final function, so don't bother erasing + // elements in m_cycles_to_keep. + for (auto id : m_cycles_to_keep) { + if (!should_add_swaps_for_candidate(cycles, id)) { + continue; + } + const auto& vertices = cycles.at(id).vertices; + for (size_t ii = vertices.size() - 1; ii > 0; --ii) { + VertexSwapResult(vertices[ii], vertices[ii - 1], vertex_mapping, swaps); + } + } +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/CyclesGrowthManager.cpp b/tket/src/TokenSwapping/CyclesGrowthManager.cpp new file mode 100644 index 0000000000..2797924868 --- /dev/null +++ b/tket/src/TokenSwapping/CyclesGrowthManager.cpp @@ -0,0 +1,196 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "CyclesGrowthManager.hpp" + +#include + +#include "TokenSwapping/DistanceFunctions.hpp" +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +bool Cycle::contains(size_t vertex) const { + for (auto vv : vertices) { + if (vertex == vv) { + return true; + } + } + return false; +} + +CyclesGrowthManager::Options& CyclesGrowthManager::get_options() { + return m_options; +} + +const Cycles& CyclesGrowthManager::get_cycles( + bool throw_if_cycles_are_not_candidates) const { + // GCOVR_EXCL_START + TKET_ASSERT( + !(throw_if_cycles_are_not_candidates && !m_cycles_are_candidates)); + // GCOVR_EXCL_STOP + return m_cycles; +} + +bool CyclesGrowthManager::reset( + const VertexMapping& vertex_mapping, DistancesInterface& distances, + NeighboursInterface& neighbours) { + m_cycles.clear(); + m_cycles_are_candidates = false; + + // OK, a bit inefficient, every really good swap (decreasing L by 2) + // will appear twice, but not a disaster. + // If no such swap exists, then this stored data will be necessary, because + // direction matters in longer cycles: v0->v1->v2->v0 is very different + // from v2->v1->v0->v2. + // It's simplest just to treat swaps as special cases of cycles, on 2 + // vertices. + for (const auto& entry : vertex_mapping) { + const auto source = entry.first; + const auto target = entry.second; + const auto source_distance_to_target = distances(source, target); + if (source_distance_to_target == 0) { + continue; + } + const auto& adj_vertices = neighbours(source); + for (auto adj_v : adj_vertices) { + const auto other_v_distance_to_target = distances(adj_v, target); + if (other_v_distance_to_target < source_distance_to_target) { + const auto new_id = m_cycles.emplace_back(); + auto& cycle = m_cycles.at(new_id); + cycle.decrease = 1; + cycle.vertices.resize(2); + cycle.vertices[0] = source; + cycle.vertices[1] = adj_v; + if (m_cycles.size() >= m_options.max_number_of_cycles) { + return true; + } + } + } + } + return !m_cycles.empty(); +} + +bool CyclesGrowthManager::attempt_to_close_cycles( + const VertexMapping& vertex_mapping, DistancesInterface& distances) { + TKET_ASSERT(!m_cycles_are_candidates); + for (auto id_opt = m_cycles.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = m_cycles.next(id); + auto& cycle = m_cycles.at(id); + const int decrease = get_move_decrease( + vertex_mapping, cycle.vertices.back(), cycle.vertices[0], distances); + const int new_decrease = cycle.decrease + decrease; + if (new_decrease > 0) { + cycle.decrease = new_decrease; + if (!m_cycles_are_candidates) { + // It's the first good one, so delete all previous. + for (auto prev_id_opt = m_cycles.previous(id); prev_id_opt;) { + const auto id_to_be_deleted = prev_id_opt.value(); + prev_id_opt = m_cycles.previous(id_to_be_deleted); + m_cycles.erase(id_to_be_deleted); + } + } + m_cycles_are_candidates = true; + } else { + // Not a good closed cycle; do we delete it? + if (m_cycles_are_candidates) { + m_cycles.erase(id); + } + } + } + return m_cycles_are_candidates; +} + +CyclesGrowthManager::GrowthResult CyclesGrowthManager::attempt_to_grow( + const VertexMapping& vertex_mapping, DistancesInterface& distances, + NeighboursInterface& neighbours) { + GrowthResult result; + + TKET_ASSERT(!m_cycles.empty()); + + if (m_cycles.front().vertices.size() >= m_options.max_cycle_size) { + m_cycles.clear(); + result.hit_cycle_length_limit = true; + result.empty = true; + return result; + } + for (auto id_opt = m_cycles.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = m_cycles.next(id); + + // Add an arrow onto the back. + const auto back_vertex = m_cycles.at(id).vertices.back(); + const auto& adj_vertices = neighbours(back_vertex); + for (auto adj_v : adj_vertices) { + int new_decr; + { + // Important not to reuse this once cycles are added, + // as it may be invalidated + const auto& cycle = m_cycles.at(id); + if (cycle.contains(adj_v)) { + continue; + } + new_decr = + cycle.decrease + + get_move_decrease(vertex_mapping, back_vertex, adj_v, distances); + + // If there are N moves, each move can only decrease L by at most one, + // so it's unfair to demand a huge L-decrease, because shorter cycles + // would be killed immediately. + // With N vertices there are N-1 moves, but we are about to add + // the new vertex adj_v to this partial cycle (unless we discard it), + // taking it back up to N. + const int num_moves = cycle.vertices.size(); + int min_decrease = num_moves; + min_decrease = + std::min(min_decrease, m_options.min_decrease_for_partial_path); + + // We want 100*(L-decr)/(num.moves) >= + // min_power_percentage_for_partial_path. But we need the ceiling + // because of interger division. + min_decrease = std::max( + min_decrease, + (99 + m_options.min_power_percentage_for_partial_path * num_moves) / + 100); + + if (new_decr < min_decrease) { + continue; + } + } + // A new cycle to be added. Add it before the current position, + // so we won't pass through it again in the main loop. + const auto new_id = m_cycles.insert_before(id); + auto& new_cycle = m_cycles.at(new_id); + new_cycle.decrease = new_decr; + new_cycle.vertices = m_cycles.at(id).vertices; + new_cycle.vertices.push_back(adj_v); + if (m_cycles.size() >= m_options.max_number_of_cycles) { + // Break out of the INNER loop, i.e. neighbours for this + // cycle endpoint. However, this cycle is about to be deleted, + // creating space, so continue with further cycles. + break; + } + } + m_cycles.erase(id); + } + result.empty = m_cycles.empty(); + return result; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/CyclesPartialTsa.cpp b/tket/src/TokenSwapping/CyclesPartialTsa.cpp new file mode 100644 index 0000000000..d6cf13f523 --- /dev/null +++ b/tket/src/TokenSwapping/CyclesPartialTsa.cpp @@ -0,0 +1,96 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "CyclesPartialTsa.hpp" + +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +CyclesPartialTsa::CyclesPartialTsa() { m_name = "Cycles"; } + +void CyclesPartialTsa::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + RiverFlowPathFinder& path_finder) { + // We'll add the calculated swaps to the path finder at the end. + // THIS is the right place to do it, not the caller, because + // (as far as the caller knows) it's possible that PartialTSA objects + // reduce/reorder swaps, and so it would be invalid just to go back through + // the appended swaps. However, THIS class knows that no reordering or + // reduction occurs. + const size_t initial_swap_size = swaps.size(); + for (;;) { + const auto swap_size_before = swaps.size(); + single_iteration_partial_solution( + swaps, vertex_mapping, distances, neighbours); + const auto swap_size_after = swaps.size(); + TKET_ASSERT(swap_size_after >= swap_size_before); + if (swap_size_before == swap_size_after) { + break; + } + } + const size_t final_swap_size = swaps.size(); + TKET_ASSERT(initial_swap_size <= final_swap_size); + if (initial_swap_size == final_swap_size) { + return; + } + // At least one swap was added. + const auto current_back_id_opt = swaps.back_id(); + TKET_ASSERT(current_back_id_opt); + auto current_id = current_back_id_opt.value(); + for (size_t remaining_swaps = final_swap_size - initial_swap_size;;) { + const auto& swap = swaps.at(current_id); + path_finder.register_edge(swap.first, swap.second); + --remaining_swaps; + if (remaining_swaps == 0) { + break; + } + const auto prev_id_opt = swaps.previous(current_id); + TKET_ASSERT(prev_id_opt); + current_id = prev_id_opt.value(); + } +} + +void CyclesPartialTsa::single_iteration_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours) { + if (!m_growth_manager.reset(vertex_mapping, distances, neighbours)) { + // no solutions. + return; + } + + for (auto infinite_loop_guard = m_growth_manager.get_options().max_cycle_size; + infinite_loop_guard > 0; --infinite_loop_guard) { + if (m_growth_manager.attempt_to_close_cycles(vertex_mapping, distances)) { + // Some solutions found. + m_candidate_manager.append_partial_solution( + m_growth_manager, swaps, vertex_mapping); + return; + } + // No solutions so far, so grow... + const auto growth_result = + m_growth_manager.attempt_to_grow(vertex_mapping, distances, neighbours); + if (growth_result.empty || growth_result.hit_cycle_length_limit) { + return; + } + } + TKET_ASSERT(!"growth_manager termination"); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp new file mode 100644 index 0000000000..88ff1228e6 --- /dev/null +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.cpp @@ -0,0 +1,66 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "CyclicShiftCostEstimate.hpp" + +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +CyclicShiftCostEstimate::CyclicShiftCostEstimate( + const std::vector& vertices, DistancesInterface& distances) { + TKET_ASSERT(vertices.size() >= 2); + // We first work out the total distance v(0)->v(1)-> .. -> v(n) -> v(0). + // If we snip out v(i)->v(i+1), the remaining path tells us how many swaps + // we need. So, we must snip out the LARGEST distance(v(i), v(i+1)). + size_t largest_distance = distances(vertices.back(), vertices[0]); + size_t total_distance = largest_distance; + + if (vertices.size() == 2) { + start_v_index = 0; + } else { + // The value i such that distance(v(i), v(i+1)) is largest. + size_t v_index_with_largest_distance = vertices.size() - 1; + for (size_t ii = 0; ii + 1 < vertices.size(); ++ii) { + const auto distance_i = distances(vertices[ii], vertices[ii + 1]); + TKET_ASSERT(distance_i > 0); + total_distance += distance_i; + if (distance_i < largest_distance) { + largest_distance = distance_i; + v_index_with_largest_distance = ii; + } + } + // Now, remove the largest distance again... + total_distance -= largest_distance; + // We've snipped out (v[i], v[i+1]), so logically we start from v[i+1]. + start_v_index = (v_index_with_largest_distance + 1) % vertices.size(); + } + // To enact an abstract cyclic shift [a,b,c,d], + // choose abstract swaps (cd), (bc), (ab). + // The number of CONCRETE swaps to enact an abstract swap (xy) is + // 2.dist(x,y) - 1. + // e.g., to swap x,y along the path [x,u,v,y], dist(x,y)=3, + // we use 5 concrete vertex swaps (xu), (uv), (vy), (uv), (xu). + // What we've currently stored is the sum of dist(x,y), + // and clearly (sum)(-1) = -(Number of terms in the sum). + estimated_concrete_swaps = 2 * total_distance; + TKET_ASSERT(estimated_concrete_swaps > vertices.size() - 1); + estimated_concrete_swaps -= vertices.size() - 1; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp b/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp new file mode 100644 index 0000000000..fa3111afd5 --- /dev/null +++ b/tket/src/TokenSwapping/CyclicShiftCostEstimate.hpp @@ -0,0 +1,76 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "DistancesInterface.hpp" + +namespace tket { +namespace tsa_internal { + +/** Used in the TrivialTSA class (NOT in CyclesPartialTsa!) + * Given a desired abstract cyclic shift on [v0, v1, v2, ..., vn], + * i.e. abstract moves v(0)->v(1)->v(2)-> ... ->v(n)->v(0), + * [meaning that v(i), v(i+1) need not actually be adjacent in the graph, + * so we must decide how to represent the desired moves as actual swaps], + * there are n+1 possible obvious ways to enact it + * (and of course, maybe some "nonobvious" ways. + * Finding a good way is, of course, a special case of the Token Swapping + * problem which we're trying to solve!) + * (Of course, also maybe more than n+1 "obvious" ways because paths + * from v[i] to v[i+1] might not be unique). + * + * It's important that the overall effect of the complete cycle + * doesn't move any OTHER tokens, so that we can GUARANTEE that + * the final TrivialTSA solution really does terminate in all cases. + * + * We can "swap along" the path v(i), v(i+1), ..., v(i+n) for any 0 <= i <= n + * (regarding the v indices as wrapping around cyclicly, + * i.e. reducing (i+n) mod (n+1).) + * + * This finds a choice giving the smallest number of concrete swaps, + * assuming no additional swap optimisation, + * and disregarding the tokens on the vertices. + * + * It may not be the genuinely best solution + * because (1) swap sequences can often be optimised; + * (2) some of the swaps may be empty, and hence removable. + * But finding a truly optimal solution, taking these into account, + * is probably about as hard as the general token swapping problem. + */ +struct CyclicShiftCostEstimate { + /** A simple estimate of how many swaps will be needed. */ + size_t estimated_concrete_swaps = 0; + + /** If the stored vertices are v[0], v[1], ..., v[n], + * this is the value of i such that swapping along the abstract path + * v[i], v[i+1], ..., v[i+n] gives the smallest number of swaps. + * (Remembering that each abstract move is v[j] -> v[j+1]). + */ + size_t start_v_index = std::numeric_limits::max(); + + /** Calculate the data upon construction. + * @param vertices The list of vertices, in order, for a cyclic shift. + * Must have size >= 2. + * @param distances An object to calculate distances (we don't need to know + * WHICH path between vertices will be used, at this stage). + */ + CyclicShiftCostEstimate( + const std::vector& vertices, DistancesInterface& distances); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/DistancesInterface.cpp b/tket/src/TokenSwapping/DistancesInterface.cpp new file mode 100644 index 0000000000..2ad47c308b --- /dev/null +++ b/tket/src/TokenSwapping/DistancesInterface.cpp @@ -0,0 +1,36 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "DistancesInterface.hpp" + +using std::vector; + +namespace tket { + +void DistancesInterface::register_shortest_path( + const vector& /*path*/) {} + +void DistancesInterface::register_neighbours( + size_t vertex, const vector& neighbours) { + for (size_t nv : neighbours) { + register_edge(vertex, nv); + } +} + +void DistancesInterface::register_edge(size_t /*vertex1*/, size_t /*vertex2*/) { +} + +DistancesInterface::~DistancesInterface() {} + +} // namespace tket diff --git a/tket/src/TokenSwapping/DynamicTokenTracker.cpp b/tket/src/TokenSwapping/DynamicTokenTracker.cpp new file mode 100644 index 0000000000..18c65ba2ff --- /dev/null +++ b/tket/src/TokenSwapping/DynamicTokenTracker.cpp @@ -0,0 +1,77 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "DynamicTokenTracker.hpp" + +namespace tket { +namespace tsa_internal { + +void DynamicTokenTracker::clear() { m_vertex_to_token.clear(); } + +void DynamicTokenTracker::reset() { + for (auto& entry : m_vertex_to_token) { + entry.second = entry.first; + } +} + +Swap DynamicTokenTracker::do_vertex_swap(const Swap& swap) { + const auto v1 = swap.first; + const auto v2 = swap.second; + const auto t1 = get_token_at_vertex(v1); + const auto t2 = get_token_at_vertex(v2); + m_vertex_to_token[v1] = t2; + m_vertex_to_token[v2] = t1; + return get_swap(t1, t2); +} + +bool DynamicTokenTracker::equal_vertex_permutation_from_swaps( + const DynamicTokenTracker& other) const { + return tokens_here_have_equal_locations_in_the_other_object(other) && + other.tokens_here_have_equal_locations_in_the_other_object(*this); +} + +bool DynamicTokenTracker::tokens_here_have_equal_locations_in_the_other_object( + const DynamicTokenTracker& other) const { + for (const auto& vertex_token_pair : m_vertex_to_token) { + const auto vertex = vertex_token_pair.first; + const auto token = vertex_token_pair.second; + const auto citer = other.m_vertex_to_token.find(vertex); + + if (citer == other.m_vertex_to_token.cend()) { + // If it's unmentioned by the other, then the vertex MUST be fixed + // to give the same permutation. + // Otherwise, the other object doesn't know where the token moved to. + if (vertex != token) { + return false; + } + } else { + if (token != citer->second) { + return false; + } + } + } + return true; +} + +size_t DynamicTokenTracker::get_token_at_vertex(size_t vertex) { + const auto iter = m_vertex_to_token.find(vertex); + if (iter == m_vertex_to_token.end()) { + m_vertex_to_token[vertex] = vertex; + return vertex; + } + return iter->second; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/HybridTsa.cpp b/tket/src/TokenSwapping/HybridTsa.cpp new file mode 100644 index 0000000000..b8b15c39a7 --- /dev/null +++ b/tket/src/TokenSwapping/HybridTsa.cpp @@ -0,0 +1,52 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "HybridTsa.hpp" + +#include "TokenSwapping/DistanceFunctions.hpp" +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +HybridTsa::HybridTsa() { + m_name = "HybridTsa"; + m_trivial_tsa.set(TrivialTSA::Options::BREAK_AFTER_PROGRESS); +} + +void HybridTsa::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + RiverFlowPathFinder& path_finder) { + const auto initial_L = get_total_home_distances(vertex_mapping, distances); + for (size_t counter = initial_L + 1; counter > 0; --counter) { + const auto swaps_before = swaps.size(); + m_cycles_tsa.append_partial_solution( + swaps, vertex_mapping, distances, neighbours, path_finder); + + m_trivial_tsa.append_partial_solution( + swaps, vertex_mapping, distances, neighbours, path_finder); + + if (swaps_before == swaps.size()) { + TKET_ASSERT(all_tokens_home(vertex_mapping)); + return; + } + } + TKET_ASSERT(!"hybrid TSA termination"); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/NeighboursInterface.cpp b/tket/src/TokenSwapping/NeighboursInterface.cpp new file mode 100644 index 0000000000..33fc794b9b --- /dev/null +++ b/tket/src/TokenSwapping/NeighboursInterface.cpp @@ -0,0 +1,27 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "NeighboursInterface.hpp" + +#include "Utils/Exceptions.hpp" + +namespace tket { + +const std::vector& NeighboursInterface::operator()(size_t) { + throw NotImplemented("NeighboursInterface::get_neighbours: not implemented"); +} + +NeighboursInterface::~NeighboursInterface() {} + +} // namespace tket diff --git a/tket/src/TokenSwapping/PartialTsaInterface.cpp b/tket/src/TokenSwapping/PartialTsaInterface.cpp new file mode 100644 index 0000000000..f80248db72 --- /dev/null +++ b/tket/src/TokenSwapping/PartialTsaInterface.cpp @@ -0,0 +1,25 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "PartialTsaInterface.hpp" + +#include "Utils/Exceptions.hpp" + +namespace tket { +namespace tsa_internal { + +const std::string& PartialTsaInterface::name() const { return m_name; } + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/README.txt b/tket/src/TokenSwapping/README.txt new file mode 100644 index 0000000000..0a7d21d8f1 --- /dev/null +++ b/tket/src/TokenSwapping/README.txt @@ -0,0 +1,128 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +Some brief explanation of the Token Swapping algorithms here may be helpful. + +PROBLEM: let G be a graph (undirected, no loops or multiple edges), with labelled distinct tokens (counters) on some or all of the vertices. + +- An allowed move is to choose two adjacent vertices v1, v2 and swap whatever tokens T1, T2 are currently on those vertices. (Or, if one of the vertices, say v2, does not have a token, simply move the token T1 from v1 to v2). + +- We are given a desired final rearrangement of the tokens. + +- The problem is to compute a swap sequence, of shortest length, which will transform the initial token configuration into the desired final configuration. + +- Thus, if every vertex contains a token, we are trying to perform a given permutation on the vertices of the graph. + +- It is not hard to show: that if the graph is connected, then EVERY rearrangement is possible. + +- More generally, a solution exists if and only if: for every token, the initial and final vertex are in the same connected component. + +The 2016 paper "Approximation and Hardness of Token Swapping" by Tillmann Miltzow, Lothar Narins, Yoshio Okamoto, Gunter Rote, Antonis Thomas, Takeaki Uno is very useful. + +One of the main results is an algorithm, based upon finding cycles in the graph, which (in the full case, where every vertex has a token) is guaranteed to use no more than 4x the optimal number of swaps, or 2x for trees. + +The 2019 paper "Circuit Transformations for Quantum Architectures" by Andrew M. Childs, Eddie Schoute and Cem M. Unsal generalises to the partial case (where some vertices might not contain a token). + + +KNOWN RESTRICTIONS: + +If the graph G is not connected, our routines may fail. + +- We plan to fix this; until then, a workaround is to split the problem into connected components. + +- Of course, in real problems, architectures are connected so this doesn't arise. + + +OUR ALGORITHMS: + +Let L be the sum of distances of each token T from its final destination vertex. + +Thus L >= 0 always, and the problem is finished if and only if L=0. + +Thus, the goal of a token swapping algorithm (TSA) is to reduce L to zero, using as few swaps as possible. + +(1) Cycle finding + +We make some observations: + +- The algorithms in the 2016 and 2019 papers try to find a cycle [v(0), v(1), ..., v(n), v(0)] in G. Initially, let vertex v(j) have token T(j). The swaps [v(0), v(1)], [v(1), v(2)], ..., [v(n-1), v(n)] then have the effect of performing a cyclic shift: T(0) -> v(n), T(1) -> v(0), T(2) -> v(1), ..., T(n) -> v(n-1). + +- However, we do not need the swap [v(n), v(0)]. Thus, we can perform cyclic shifts along paths [v(0), v(1), ..., v(n)] instead of cycles. + +- The paper algorithms search for cycles where EVERY token move T->v is beneficial, i.e. moves the token T one step closer to its final destination. Thus, the cycle on n vertices reduces L by n, at the cost of n-1 swaps. + +- Cyclic shifts on n vertices may exist which contain swaps which don't decrease L (they might even increase L). However, as long as the overall effect is a decrease in L, the cyclic shift is not bad. + +Thus, our algorithm works by searching for good PATHS instead of cycles; it allows some individual swaps to be bad, as long as the overall effect is still good. + +(2) Alternative moves when cycles fail + +When no good cycles exist, a different move must be performed. The performed move might not decrease L (although L never increases, it may leave L unchanged), and so we must worry whether the algorithm will ever terminate. + +- In the papers, the performed moves are carefully chosen so that theorems guarantee that the algorithm terminates in a reasonable number of moves. + +- We have no such theoretical guarantee. Instead, we perform a "trivial TSA", i.e., a sequence of swaps guaranteed to reduce L to zero eventually, even if the number of swaps is not so good. + +However, notice that we need not perform the "trivial TSA" swaps until the end; we can break off as soon as L decreases, and switch back to the normal cyclic-shift mode, which is expected to reduce L more quickly. + +(3) Additional reductions + +We perform two more reductions, which are new: + +- General swap list optimisation: given a sequence S of swaps, various simple substitutions may reduce the length of S, whilst keeping the overall effect of S unchanged; provided that we only use the same swaps that were present in S, the new sequence is still valid (does not use any further edges of the graph). + +- General table lookup reduction: we have a large precomputed table which contains optimal swap sequences on graphs with <= 6 vertices. Thus, given our computed swap sequence S, we find the vertex mapping between two times, look up an optimal swap sequence for the mapping in the table (using only edges in our given graph, i.e. valid swaps), and replace the swap segment if the new sequence is shorter. + + + +THE MAIN ALGORITHMIC CLASSES: + + +BestFullTsa: + +The main end-to-end mapping. Uses HybridTsa to compute a solution; then general reduction rules (SwapListOptimiser) and table lookups (SwapListSegmentOptimiser, SwapListTableOptimiser) to reduce the swap sequence length, whilst preserving the mapping. + + +HybridTsa: + +Combines CyclesPartialTsa, TrivialTSA to get an overall full TSA (token swapping algorithm). This works by running CyclesPartialTsa whenever possible; switching to TrivialTSA when it gets stuck; and breaking off TrivialTSA and switching back to CyclesPartialTsa when progress is made (i.e., L is decreased). + +Note that HybridTsa also uses simple heuristics about which abstract cycles to perform first, and which abstract swap to omit. + +(I.e., to perform the abstract cyclic shift v0->v1->v2->...->v(n)->v0, we often write [v(n), v(n-1)], [v(n-1), v(n-2)], ..., [v2, v1], [v1, v0]. Thus we have OMITTED [v0, v(n)] from the set of swaps [v(i), v(i+1)] for i=0,1,2,...,n, where v(n+1) = v(0) by definition. However, we could have omitted any of the n+1 swaps). + +HybridTsa tries to estimate which ordering is likely to reduce L the quickest; although TrivialTSA works, it is expected to be worse than CyclesPartialTsa when that class works, so we want to break off as soon as possible. + + +CyclesPartialTsa: + +The main "cycle finding" class, corresponding to the cycles in the papers. "Partial" because it is not guaranteed to find a swap sequence. Constructs "concrete" paths, i.e. actual paths in the graph, which give rise to "abstract cycles" (i.e. a cyclic shift, performed by swapping along the path). + + +TrivialTSA: + +Performs any desired vertex permutation, as follows: + +(i) split the permutation into disjoint cycles. (Called "abstract" cycles, because they are unrelated to the cycles in the actual graph. They are TOTALLY UNRELATED to the cycles of "CyclesPartialTsa"! In fact they know nothing of the underlying graph). + +(ii) decompose the abstract cycles into "abstract swaps", i.e. without knowing the edges of the graph, the cyclic shift v0->v1->v2->...->vn->v0 can be rewritten as the abstract swaps [vn, v(n-1)], [v(n-1), v(n-2)], ..., [v2, v1], [v1, v0], which might not be possible in the graph. + +(iii) decompose the abstract swaps into concrete swaps. I.e., choose a path [u, v0, v1, ..., v(n), v] between given (u,v), so that the abstract swap(u,v) can be performed by swapping along the path. + + +RiverFlowPathFinder: + +Actually computes the path required by TrivialTSA, for part (iii). We don't just choose a path at random; we deliberately make the paths overlap as much as possible, for better optimisation later (and tests showed that this really is significant). + + diff --git a/tket/src/TokenSwapping/RiverFlowPathFinder.cpp b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp new file mode 100644 index 0000000000..6c128069a9 --- /dev/null +++ b/tket/src/TokenSwapping/RiverFlowPathFinder.cpp @@ -0,0 +1,190 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "RiverFlowPathFinder.hpp" + +#include +#include + +#include "SwapFunctions.hpp" +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +struct RiverFlowPathFinder::Impl { + DistancesInterface& distances_calculator; + NeighboursInterface& neighbours_calculator; + RNG& rng; + + typedef std::uint64_t EdgeCount; + + /** The key is an undirected edge; the value is the number of times + * that edge was already used in any requested path. + * (So, we favour flows in both directions). + * Overflow is basically impossible, but even if it did occur, + * it would not invalidate the results (it just means that some + * paths might change more than expected). + */ + std::map edge_counts; + + struct ArrowData { + size_t end_vertex; + EdgeCount count; + }; + + /** A work vector. When we are trying to expand a path by one step, + * we need to list all those steps which are valid, i.e. reduce + * the distance to the target by one. + */ + vector candidate_moves; + + /// A work vector, will be built up + vector path; + + Impl( + DistancesInterface& distances_interface, + NeighboursInterface& neighbours_interface, RNG& random_generator) + : distances_calculator(distances_interface), + neighbours_calculator(neighbours_interface), + rng(random_generator) {} + + void reset(); + + /// Increases nonempty "path" towards the target vertex. + void grow_path(size_t target_vertex, size_t required_path_size); + + /// Once "path" has been filled, update the counts (so that future paths + /// through similar vertices are more likely to overlap). + void update_data_with_path(); +}; + +void RiverFlowPathFinder::Impl::reset() { + for (auto& entry : edge_counts) { + entry.second = 0; + } + rng.set_seed(); +} + +void RiverFlowPathFinder::Impl::grow_path( + size_t target_vertex, size_t required_path_size) { + TKET_ASSERT(path.size() < required_path_size); + TKET_ASSERT(!path.empty()); + + // We don't yet know how to move on, so we must choose a neighbour. + // All candidates will have the same edge count. + candidate_moves.clear(); + + const auto remaining_distance = required_path_size - path.size(); + const auto& neighbours = neighbours_calculator(path.back()); + distances_calculator.register_neighbours(path.back(), neighbours); + + for (size_t neighbour : neighbours) { + const auto neighbour_distance_to_target = + distances_calculator(neighbour, target_vertex); + + if (neighbour_distance_to_target == remaining_distance - 1) { + // Notice that nonexistent entries will be automatically set + // to have count 0, by the C++ standard. + const auto edge_count = edge_counts[get_swap(path.back(), neighbour)]; + if (!candidate_moves.empty()) { + // We'll only add candidates with the same count or higher. + if (candidate_moves[0].count > edge_count) { + continue; + } + if (candidate_moves[0].count < edge_count) { + candidate_moves.clear(); + } + } + candidate_moves.emplace_back(); + candidate_moves.back().end_vertex = neighbour; + candidate_moves.back().count = edge_count; + continue; + } + // GCOVR_EXCL_START + TKET_ASSERT( + neighbour_distance_to_target == remaining_distance || + neighbour_distance_to_target == remaining_distance + 1 || + AssertMessage() << "d(v_" << path.back() << ", v_" << target_vertex + << ")=" << remaining_distance << ". But v_" + << path.back() << " has neighbour v_" << neighbour + << ", at distance " << neighbour_distance_to_target + << " to the target v_" << target_vertex); + // GCOVR_EXCL_STOP + } + // GCOVR_EXCL_START + TKET_ASSERT( + !candidate_moves.empty() || + AssertMessage() << "No neighbours of v_" << path.back() + << " at correct distance " << remaining_distance - 1 + << " to target vertex v_" << target_vertex); + // GCOVR_EXCL_STOP + + const auto& choice = rng.get_element(candidate_moves); + path.push_back(choice.end_vertex); +} + +void RiverFlowPathFinder::Impl::update_data_with_path() { + for (size_t ii = 1; ii < path.size(); ++ii) { + // Nonexistent counts automatically set to 0 initially + ++edge_counts[get_swap(path[ii - 1], path[ii])]; + } + distances_calculator.register_shortest_path(path); +} + +RiverFlowPathFinder::RiverFlowPathFinder( + DistancesInterface& distances_interface, + NeighboursInterface& neighbours_interface, RNG& rng) + : m_pimpl(std::make_unique( + distances_interface, neighbours_interface, rng)) {} + +RiverFlowPathFinder::~RiverFlowPathFinder() {} + +void RiverFlowPathFinder::reset() { m_pimpl->reset(); } + +const vector& RiverFlowPathFinder::operator()( + size_t vertex1, size_t vertex2) { + m_pimpl->path.clear(); + m_pimpl->path.push_back(vertex1); + if (vertex1 == vertex2) { + return m_pimpl->path; + } + + // We must build up the path. + // The number of vertices including the source and target. + const size_t final_path_size = + 1 + m_pimpl->distances_calculator(vertex1, vertex2); + + for (size_t infinite_loop_guard = 10 * final_path_size; + infinite_loop_guard != 0; --infinite_loop_guard) { + m_pimpl->grow_path(vertex2, final_path_size); + if (m_pimpl->path.size() == final_path_size) { + TKET_ASSERT(m_pimpl->path.back() == vertex2); + m_pimpl->update_data_with_path(); + return m_pimpl->path; + } + } + throw std::runtime_error("get path - dropped out of loop"); +} + +void RiverFlowPathFinder::register_edge(size_t vertex1, size_t vertex2) { + // Automatically zero if the edge doesn't exist. + auto& edge_count = m_pimpl->edge_counts[get_swap(vertex1, vertex2)]; + ++edge_count; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/SwapListOptimiser.cpp b/tket/src/TokenSwapping/SwapListOptimiser.cpp new file mode 100644 index 0000000000..eddf3ae63f --- /dev/null +++ b/tket/src/TokenSwapping/SwapListOptimiser.cpp @@ -0,0 +1,297 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "SwapListOptimiser.hpp" + +#include "Utils/Assert.hpp" +#include "VertexSwapResult.hpp" + +namespace tket { +namespace tsa_internal { + +void SwapListOptimiser::push_back(SwapList& list, const Swap& swap) { + if (list.empty() || list.back() != swap) { + list.push_back(swap); + return; + } + list.pop_back(); +} + +// It may be that using a std::set is very slightly quicker +// (to store only the vertices containing tokens, as we don't care about the +// targets). However, it's simpler just to use the copied VertexMapping; not +// worth worrying about. (Also, if a std::map is large, then copying all keys +// into a std::set might actually be SLOWER than copying the whole map; +// HOPEFULLY the compiler can copy a whole map very quickly just by copying raw +// bytes, but for a std::set it would have to insert the keys one-by-one and do +// a lot of tree rebalancing). +void SwapListOptimiser::optimise_pass_remove_empty_swaps( + SwapList& list, VertexMapping vertex_mapping) { + auto id_opt = list.front_id(); + while (id_opt) { + const auto id = id_opt.value(); + id_opt = list.next(id); + const VertexSwapResult result(list.at(id), vertex_mapping); + if (result.tokens_moved == 0) { + list.erase(id); + } + } +} + +std::optional +SwapListOptimiser::get_id_of_previous_blocker(SwapList& list, SwapID id) { + const auto& initial_swap = list.at(id); + + // This is the first non-disjoint swap it hits when it moves back. + // Guaranteed to be valid if we drop out of the loop. + SwapID current_id = id; + + bool terminated_correctly = false; + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto prev_id = list.previous(current_id); + if (!prev_id) { + // Right at the front! + return {}; + } + current_id = prev_id.value(); + const auto& new_swap = list.at(current_id); + if (!disjoint(initial_swap, new_swap)) { + // Blocks, OR identical + if (new_swap != initial_swap) { + // It blocks + return current_id; + } + terminated_correctly = true; + break; + } + } + TKET_ASSERT(terminated_correctly); + // It's hit a copy of itself + list.erase(id); + list.erase(current_id); + return {}; +} + +bool SwapListOptimiser::move_swap_towards_front(SwapList& list, SwapID id) { + TKET_ASSERT(list.front_id()); + if (id == list.front_id().value()) { + return false; + } + const auto old_size = list.size(); + const auto previous_blocker_opt = get_id_of_previous_blocker(list, id); + if (old_size != list.size()) { + // The swap was erased! + return true; + } + if (previous_blocker_opt) { + // It can't move all the way to the front. + const ID blocker = previous_blocker_opt.value(); + + // Must be non-null. + const ID previous_id = list.previous(id).value(); + if (blocker != previous_id) { + // Do the move...erase before insert to minimise possible sizes... + const auto swap = list.at(id); + list.erase(id); + const auto new_id = list.insert_after(blocker); + list.at(new_id) = swap; + } + return false; + } + // There was no blocker, so we CAN move all the way to the front + // (and we checked before that we're not already at the front). + const auto swap = list.at(id); + list.erase(id); + list.push_front(swap); + return false; +} + +void SwapListOptimiser::optimise_pass_with_zero_travel(SwapList& list) { + if (list.size() <= 1) { + return; + } + ID current_id = list.front_id().value(); + + // This moves swaps to cancel with previous identical swaps, + // if there is nothing blocking the move. + // However, only worth doing if previous identical swaps do exist; + // leaves them unchanged otherwise. + // We can be sneaky: rather than storing all previous IDs + // for each swap, we store the NUMBER of them; we don't need + // to know the previous location, since the move back + // will check for that anyway. + // + // This probably could be cleverly optimised further + // but would require more thought. + for (auto& entry : m_data) { + // This is quicker than clearing and reinserting; + // no tree rebalancing. + entry.second = 0; + } + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto next_id_opt = list.next(current_id); + + // C++ guarantees nonexistent values will be set to 0. + auto& swap_count = m_data[list.at(current_id)]; + if (swap_count == 0) { + swap_count = 1; + } else { + // There's a possibility of cancellation. + const auto old_size = list.size(); + (void)get_id_of_previous_blocker(list, current_id); + if (old_size == list.size()) { + // No cancellation. + ++swap_count; + } else { + // Cancellation occurred; "get_id_of_previous_blocker" already erased + // both vertex swaps, but didn't update the counts. + --swap_count; + } + } + if (!next_id_opt) { + return; + } + current_id = next_id_opt.value(); + } + TKET_ASSERT(!"optimise_pass_with_zero_travel termination"); +} + +void SwapListOptimiser::optimise_pass_with_frontward_travel(SwapList& list) { + if (list.size() <= 1) { + return; + } + // Start one past the front. + ID current_id = list.front_id().value(); + current_id = list.next(current_id).value(); + + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto next_id_opt = list.next(current_id); + move_swap_towards_front(list, current_id); + if (!next_id_opt) { + return; + } + current_id = next_id_opt.value(); + } + TKET_ASSERT(!"optimise_pass_with_frontward_travel termination"); +} + +void SwapListOptimiser::optimise_pass_with_token_tracking(SwapList& list) { + if (list.size() <= 1) { + return; + } + m_token_tracker.clear(); + optimise_pass_with_token_tracking_without_clearing_tracker(list); +} + +void SwapListOptimiser:: + optimise_pass_with_token_tracking_without_clearing_tracker(SwapList& list) { + if (list.size() <= 1) { + return; + } + // Put a different token at each vertex, and start swapping. + // Now, if a TOKEN swap (rather than vertex swap) + // repeats, then removing this vertex swap together with the preceding one + // in which those two tokens were exchanged gives the same final result. + // This is because, if we don't actually carry out the first swap, + // everything proceeds as before, and all tokens except those two + // are in the same place. When we reach the time of the second swap, + // everything is as before EXCEPT that the two tokens have changed places; + // thus the effect of the second swap + // was merely to interchange those two tokens again. + // + // Now, m_data will store the previous LOCATIONS of vertex swaps. + // + // The actual values of the tokens are irrelevant, + // as long as they are distinct. + + const auto invalid_index = VectorListHybridSkeleton::get_invalid_index(); + + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + // Keep looping until we stop changing. + // The size is always decreasing or unchanged; + // we never insert, only erase. + const auto old_size = list.size(); + if (old_size == 0) { + return; + } + for (auto& entry : m_data) { + entry.second = invalid_index; + } + ID current_id = list.front_id().value(); + bool terminated_correctly = false; + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto& vertex_swap = list.at(current_id); + const auto token_swap = m_token_tracker.do_vertex_swap(vertex_swap); + const auto citer = m_data.find(token_swap); + if (citer != m_data.cend() && citer->second != invalid_index) { + // The swap occurred before, the entry tells us the ID. + // Erase both swaps. + list.erase(citer->second); + list.erase(current_id); + // We have to start at the beginning. + // Changing the labels for these tokens + // messes up other entries between the two swaps. + // A warm restart from the middle of the swap list + // would be a lot of extra complication, not worth it for now. + terminated_correctly = true; + break; + } + // Swap hasn't occurred before, now advance. + m_data[token_swap] = current_id; + const auto next_id_opt = list.next(current_id); + if (!next_id_opt) { + terminated_correctly = true; + break; + } + current_id = next_id_opt.value(); + } + TKET_ASSERT(terminated_correctly); + const auto new_size = list.size(); + if (old_size == new_size) { + return; + } + TKET_ASSERT(new_size < old_size); + } + TKET_ASSERT(!"optimise_pass_with_token_tracking termination"); +} + +void SwapListOptimiser::full_optimise(SwapList& list) { + // More experimentation needed to find the best combination. + optimise_pass_with_zero_travel(list); + m_token_tracker.reset(); + optimise_pass_with_token_tracking_without_clearing_tracker(list); +} + +void SwapListOptimiser::full_optimise( + SwapList& list, const VertexMapping& vertex_mapping) { + for (auto infinite_loop_guard = 1 + list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto old_size = list.size(); + full_optimise(list); + optimise_pass_remove_empty_swaps(list, vertex_mapping); + if (old_size == list.size() || list.size() == 0) { + return; + } + TKET_ASSERT(list.size() < old_size); + } + TKET_ASSERT(!"full_optimise termination"); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp new file mode 100644 index 0000000000..cc16d974cd --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/DistanceFunctions.cpp @@ -0,0 +1,54 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/DistanceFunctions.hpp" + +#include +#include + +namespace tket { +namespace tsa_internal { + +size_t get_total_home_distances( + const VertexMapping& vertex_mapping, + DistancesInterface& distances_calculator) { + size_t sum_of_distances = 0; + for (const auto& entry : vertex_mapping) { + sum_of_distances += distances_calculator(entry.first, entry.second); + } + return sum_of_distances; +} + +int get_move_decrease( + const VertexMapping& vertex_mapping, size_t v1, size_t v2, + DistancesInterface& distances) { + const auto citer = vertex_mapping.find(v1); + if (citer == vertex_mapping.cend()) { + return 0; + } + const auto target = citer->second; + const std::intmax_t v1_to_target = distances(v1, target); + const std::intmax_t v2_to_target = distances(v2, target); + return static_cast(v1_to_target - v2_to_target); +} + +int get_swap_decrease( + const VertexMapping& vertex_mapping, size_t v1, size_t v2, + DistancesInterface& distances) { + return get_move_decrease(vertex_mapping, v1, v2, distances) + + get_move_decrease(vertex_mapping, v2, v1, distances); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp new file mode 100644 index 0000000000..108cb9339f --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/SwapFunctions.cpp @@ -0,0 +1,39 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/SwapFunctions.hpp" + +#include +#include + +namespace tket { + +Swap get_swap(size_t v1, size_t v2) { + if (v1 == v2) { + std::stringstream ss; + ss << "get_swap : for equal vertices v1 = v2 = v_" << v1; + throw std::runtime_error(ss.str()); + } + if (v1 < v2) { + return std::make_pair(v1, v2); + } + return std::make_pair(v2, v1); +} + +bool disjoint(const Swap& s1, const Swap& s2) { + return s1.first != s2.first && s1.first != s2.second && + s1.second != s2.first && s1.second != s2.second; +} + +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp new file mode 100644 index 0000000000..18edbac349 --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/VertexMappingFunctions.cpp @@ -0,0 +1,94 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/VertexMappingFunctions.hpp" + +#include +#include + +#include "TokenSwapping/VertexSwapResult.hpp" +#include "Utils/Assert.hpp" + +namespace tket { + +using namespace tsa_internal; + +bool all_tokens_home(const VertexMapping& vertex_mapping) { + for (const auto& entry : vertex_mapping) { + if (entry.first != entry.second) { + return false; + } + } + return true; +} + +void check_mapping( + const VertexMapping& vertex_mapping, VertexMapping& work_mapping) { + work_mapping.clear(); + for (const auto& entry : vertex_mapping) { + // GCOVR_EXCL_START + TKET_ASSERT( + work_mapping.count(entry.second) == 0 || + AssertMessage() << "Vertices v_" << entry.first << " and v_" + << work_mapping[entry.second] + << " both have the same target vertex v_" + << entry.second); + // GCOVR_EXCL_STOP + work_mapping[entry.second] = entry.first; + } +} + +void check_mapping(const VertexMapping& vertex_mapping) { + VertexMapping work_mapping; + check_mapping(vertex_mapping, work_mapping); +} + +void append_swaps_to_interchange_path_ends( + const std::vector& path, VertexMapping& vertex_mapping, + SwapList& swap_list) { + if (path.size() < 2 || path.front() == path.back()) { + return; + } + for (size_t ii = path.size() - 1; ii > 0; --ii) { + VertexSwapResult(path[ii], path[ii - 1], vertex_mapping, swap_list); + } + for (size_t ii = 2; ii < path.size(); ++ii) { + VertexSwapResult(path[ii], path[ii - 1], vertex_mapping, swap_list); + } +} + +size_t get_source_vertex( + VertexMapping& source_to_target_map, size_t target_vertex) { + if (source_to_target_map.count(target_vertex) == 0) { + // If it IS a genuine permutation mapping (which we assume), + // then the vertex is as yet unmentioned (and hence unmoved). + source_to_target_map[target_vertex] = target_vertex; + return target_vertex; + } + for (const auto& entry : source_to_target_map) { + if (entry.second == target_vertex) { + return entry.first; + } + } + TKET_ASSERT(!"get_source_vertex"); + return target_vertex; +} + +void add_swap(VertexMapping& source_to_target_map, const Swap& swap) { + const auto source_v1 = get_source_vertex(source_to_target_map, swap.first); + const auto source_v2 = get_source_vertex(source_to_target_map, swap.second); + std::swap(source_to_target_map[source_v1], source_to_target_map[source_v2]); +} + +} // namespace tket diff --git a/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp new file mode 100644 index 0000000000..b3ea011b44 --- /dev/null +++ b/tket/src/TokenSwapping/TSAUtils/VertexSwapResult.cpp @@ -0,0 +1,58 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/VertexSwapResult.hpp" + +namespace tket { +namespace tsa_internal { + +VertexSwapResult::VertexSwapResult( + size_t v1, size_t v2, VertexMapping& vertex_mapping, SwapList& swap_list) + : VertexSwapResult(v1, v2, vertex_mapping) { + if (tokens_moved != 0) { + swap_list.push_back(get_swap(v1, v2)); + } +} + +VertexSwapResult::VertexSwapResult( + const Swap& swap, VertexMapping& vertex_mapping) + : VertexSwapResult(swap.first, swap.second, vertex_mapping) {} + +VertexSwapResult::VertexSwapResult( + size_t v1, size_t v2, VertexMapping& vertex_mapping) { + if (vertex_mapping.count(v1) == 0) { + if (vertex_mapping.count(v2) == 0) { + tokens_moved = 0; + return; + } + // No token on the first. + vertex_mapping[v1] = vertex_mapping[v2]; + vertex_mapping.erase(v2); + tokens_moved = 1; + return; + } + // A token on the first. + if (vertex_mapping.count(v2) == 0) { + vertex_mapping[v2] = vertex_mapping[v1]; + vertex_mapping.erase(v1); + tokens_moved = 1; + return; + } + // Tokens on both. + std::swap(vertex_mapping[v1], vertex_mapping[v2]); + tokens_moved = 2; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp new file mode 100644 index 0000000000..558f3b712f --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/CanonicalRelabelling.cpp @@ -0,0 +1,130 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/CanonicalRelabelling.hpp" + +#include +#include + +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +CanonicalRelabelling::CanonicalRelabelling() { + // no more than 6 vertices, so no more than 6 cycles ever needed. + m_cycles.resize(6); +} + +const CanonicalRelabelling::Result& CanonicalRelabelling::operator()( + const VertexMapping& desired_mapping) { + m_result.too_many_vertices = false; + m_result.permutation_hash = 0; + m_result.new_to_old_vertices.clear(); + m_result.old_to_new_vertices.clear(); + m_result.identity = all_tokens_home(desired_mapping); + if (m_result.identity) { + return m_result; + } + check_mapping(desired_mapping, m_work_mapping); + if (desired_mapping.size() > 6) { + m_result.too_many_vertices = true; + return m_result; + } + // If not the identity, at least 2 vertices moved. + TKET_ASSERT(desired_mapping.size() >= 2); + TKET_ASSERT(desired_mapping.size() <= 6); + + m_desired_mapping = desired_mapping; + unsigned next_cyc_index = 0; + + while (!m_desired_mapping.empty()) { + // New cycle starts + auto& this_cycle = m_cycles[next_cyc_index]; + ++next_cyc_index; + this_cycle.clear(); + this_cycle.push_back(m_desired_mapping.cbegin()->first); + bool terminated_correctly = false; + for (unsigned infinite_loop_guard = 1 + m_desired_mapping.size(); + infinite_loop_guard != 0; --infinite_loop_guard) { + const auto curr_v = this_cycle.back(); + const auto target_v = m_desired_mapping.at(curr_v); + TKET_ASSERT(m_desired_mapping.erase(curr_v) == 1); + if (target_v == this_cycle[0]) { + terminated_correctly = true; + break; + } + this_cycle.push_back(target_v); + } + TKET_ASSERT(terminated_correctly); + } + // Sort by cycle length, LONGEST cycles first. + // But, also want a "stable-like" sort: + // make a consistent choice across all platforms, + // if cycle lengths are equal, + // based only upon the vertex numbers. + m_sorted_cycles_indices.resize(next_cyc_index); + std::iota(m_sorted_cycles_indices.begin(), m_sorted_cycles_indices.end(), 0); + const auto& cycles = m_cycles; + + std::sort( + m_sorted_cycles_indices.begin(), m_sorted_cycles_indices.end(), + [cycles](unsigned ii, unsigned jj) { + const auto& cyc1 = cycles[ii]; + const auto& cyc2 = cycles[jj]; + return (cyc1.size() > cyc2.size()) || + // Using the raw vertex numbers is, of course, non-canonical, + // but necessary if we are to have stable results + // across ALL nonstable sorting algorithms + // on different platforms/compilers. + ((cyc1.size() == cyc2.size()) && cyc1[0] < cyc2[0]); + }); + + // Now we can set up the mapping. + m_result.new_to_old_vertices.clear(); + for (auto ii : m_sorted_cycles_indices) { + const auto& cyc = m_cycles[ii]; + TKET_ASSERT(!cyc.empty()); + TKET_ASSERT(cyc.size() <= 6); + for (size_t old_v : cyc) { + m_result.new_to_old_vertices.push_back(old_v); + } + } + TKET_ASSERT(m_result.new_to_old_vertices.size() <= 6); + m_result.old_to_new_vertices.clear(); + for (unsigned ii = 0; ii < m_result.new_to_old_vertices.size(); ++ii) { + m_result.old_to_new_vertices[m_result.new_to_old_vertices[ii]] = ii; + } + // GCOVR_EXCL_START + TKET_ASSERT( + m_result.new_to_old_vertices.size() == + m_result.old_to_new_vertices.size()); + // GCOVR_EXCL_STOP + // And finally, the permutation hash. + m_result.permutation_hash = 0; + for (auto ii : m_sorted_cycles_indices) { + const auto& cyc = m_cycles[ii]; + if (cyc.size() == 1) { + break; + } + m_result.permutation_hash *= 10; + m_result.permutation_hash += cyc.size(); + } + return m_result; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp new file mode 100644 index 0000000000..19a0a14c38 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/ExactMappingLookup.cpp @@ -0,0 +1,140 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/ExactMappingLookup.hpp" + +#include + +#include "TokenSwapping/FilteredSwapSequences.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" +#include "TokenSwapping/SwapConversion.hpp" +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +const ExactMappingLookup::Result& ExactMappingLookup::operator()( + const VertexMapping& desired_mapping, const vector& edges, + unsigned max_number_of_swaps) { + m_result.success = false; + m_result.too_many_vertices = desired_mapping.size() > 6; + m_result.swaps.clear(); + if (m_result.too_many_vertices) { + return m_result; + } + return improve_upon_existing_result( + desired_mapping, edges, max_number_of_swaps); +} + +const ExactMappingLookup::Result& +ExactMappingLookup::improve_upon_existing_result( + const VertexMapping& desired_mapping, const vector& edges, + unsigned max_number_of_swaps) { + max_number_of_swaps = std::min(max_number_of_swaps, 16u); + const auto& relabelling = m_relabeller(desired_mapping); + + if (relabelling.identity) { + // This beats whatever was there before, + // whether or not it was successful. + m_result.success = true; + m_result.too_many_vertices = false; + m_result.swaps.clear(); + return m_result; + } + if (relabelling.too_many_vertices) { + // We cannot get a new result, so just return the existing one, whether or + // not it succeeded. + if (!m_result.success) { + m_result.too_many_vertices = true; + } + return m_result; + } + TKET_ASSERT(relabelling.permutation_hash != 0); + { + const bool size_match = relabelling.new_to_old_vertices.size() == + relabelling.old_to_new_vertices.size(); + TKET_ASSERT(size_match); + } + TKET_ASSERT(relabelling.new_to_old_vertices.size() >= 2); + + fill_result_from_table(relabelling, edges, max_number_of_swaps); + return m_result; +} + +void ExactMappingLookup::fill_result_from_table( + const CanonicalRelabelling::Result& relabelling_result, + const vector& old_edges, unsigned max_number_of_swaps) { + if (m_result.success) { + if (m_result.swaps.empty()) { + return; + } + max_number_of_swaps = + std::min(max_number_of_swaps, m_result.swaps.size() - 1); + if (max_number_of_swaps == 0) { + return; + } + } else { + m_result.swaps.clear(); + } + SwapConversion::EdgesBitset new_edges_bitset = 0; + + for (auto old_edge : old_edges) { + const auto new_v1_opt = get_optional_value( + relabelling_result.old_to_new_vertices, old_edge.first); + if (!new_v1_opt) { + continue; + } + const auto new_v2_opt = get_optional_value( + relabelling_result.old_to_new_vertices, old_edge.second); + if (!new_v2_opt) { + continue; + } + const auto new_v1 = new_v1_opt.value(); + const auto new_v2 = new_v2_opt.value(); + TKET_ASSERT(new_v1 <= 5); + TKET_ASSERT(new_v2 <= 5); + new_edges_bitset |= SwapConversion::get_edges_bitset( + SwapConversion::get_hash_from_swap(get_swap(new_v1, new_v2))); + } + + const FilteredSwapSequences::SingleSequenceData table_result( + relabelling_result.permutation_hash, new_edges_bitset, + max_number_of_swaps); + + TKET_ASSERT(table_result.number_of_swaps > 0); + if (table_result.number_of_swaps > max_number_of_swaps) { + // No result in the table. + return; + } + TKET_ASSERT(table_result.edges_bitset != 0); + TKET_ASSERT(table_result.swaps_code > 0); + + m_result.success = true; + m_result.swaps.clear(); + auto swaps_code_copy = table_result.swaps_code; + while (swaps_code_copy != 0) { + const auto& new_swap = + SwapConversion::get_swap_from_hash(swaps_code_copy & 0xF); + swaps_code_copy >>= 4; + m_result.swaps.push_back(get_swap( + relabelling_result.new_to_old_vertices.at(new_swap.first), + relabelling_result.new_to_old_vertices.at(new_swap.second))); + } + TKET_ASSERT(m_result.swaps.size() <= 16); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp new file mode 100644 index 0000000000..6960cf1b61 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/FilteredSwapSequences.cpp @@ -0,0 +1,273 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/FilteredSwapSequences.hpp" + +#include + +#include "TokenSwapping/GeneralFunctions.hpp" +#include "TokenSwapping/SwapSequenceTable.hpp" +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +/* +NOTE: the problem is: given a bitset, i.e. an unsigned int representing a set, +design a map-type data structure whose keys are unsigned integers representing +bitsets, and values are a collection of entries using that bitset (i.e., only +using swaps whose index in a global vector of allowed swaps has a "one" in the +appropriate position in the binary expansion of the bitset). + +We must be able to look up all entries whose key is a SUBSET of the given set. +(And then, search further through those values). + +We tried various things, e.g. sorting by key, using the fact that + +(X is a subset of Y) ==> (x <= y) + +where X,Y are subsets and x,y are the integers representing them; thus you can +do a kind of binary search. + +If you want the SMALLEST value for a given key, you can sort them also and do a +kind of double binary search. (Another crucial point: when searching between two +key ranges in a sorted VECTOR of keys, you can determine how many keys exist in +the range in O(log N) time, rather than O(N) time for a map). + +These fancy algorithms are all asymptotically much more efficient than the +obvious O(N) lookup, which just goes through EVERY key and checks if it's a +subset or not, then goes through every element. + +HOWEVER, experiments showed that the fancy algorithms are actually quite a bit +slower than the obvious algorithm for the table size we care about. + +*/ + +FilteredSwapSequences::SingleSequenceData::SingleSequenceData() + : edges_bitset(0), + swaps_code(0), + number_of_swaps(std::numeric_limits::max()) {} + +/* +If the entries are distributed "randomly" and fairly uniformly amongst the +bitset keys, i.e. given a bitset, look up all keys which are a subset of that, +then asymptotically using many bits in the keys is good. + +For our table sizes, experiments suggested that it's worth having 1 bit in each +bitset key (2 min for 1 bit vs. 2 min 20 sec for no bits in one test), rather +then no keys at all, BUT not worth more than 1 bit in each key. + +e.g., for 15 bits in each bitset, each of the 15 keys being one of the bits +(we have no empty keys - pointless trying to look up swap sequences if the graph +has no edges!), assume that an average lookup query contains 5 bits. Then 10/15 += 2/3 of the keys are disjoint from it, and so most of the keys immediately can +be ruled out. + +However, it's a balancing act: if you have too many keys, then the lists for +each key become so short then you're effectively almost doing a linear search +through all entries. +*/ + +void FilteredSwapSequences::initialise( + std::vector codes) { + // Can only initialise once. + TKET_ASSERT(m_internal_data.empty()); + std::sort(codes.begin(), codes.end()); + TKET_ASSERT(!codes.empty()); + TKET_ASSERT(codes[0] != 0); + TrimmedSingleSequenceData datum; + + for (size_t ii = 0; ii < codes.size(); ++ii) { + if (ii != 0 && codes[ii] == codes[ii - 1]) { + // Filter out duplicate entries. + continue; + } + datum.swaps_code = codes[ii]; + datum.edges_bitset = SwapConversion::get_edges_bitset(datum.swaps_code); + push_back(datum); + } +} + +void FilteredSwapSequences::push_back(TrimmedSingleSequenceData datum) { + auto bitset_copy = datum.edges_bitset; + TKET_ASSERT(bitset_copy != 0); + SwapConversion::EdgesBitset bit_to_use = 0; + + // We want to add to the smallest list, to keep the data balanced. + // Tests showed that this works well; the entries are distributed + // very close to uniformly amongst the 15 possible keys. + // + // This is maybe surprising, because you'd expect + // more bias: you'd expect, due to the relabelling scheme, the table to have + // swaps like (0,1), (0,2) much more frequently than higher-numbered + // vertices like (4,5). This may or may not be the case, but whatever + // the truth, there are still enough bits available overall to break + // the entries up well enough). + size_t list_size_to_use = std::numeric_limits::max(); + + while (bitset_copy != 0) { + const auto new_bit = get_rightmost_bit(bitset_copy); + // If the key does not exist, the newly created empty list will + // immediately be filled; so no key is wasted. (They're not wasted anyway, + // the table entries are very close to uniformly distributed + // amongst all 15 keys). + const auto list_size = m_internal_data[new_bit].size(); + + if (list_size < list_size_to_use) { + list_size_to_use = list_size; + bit_to_use = new_bit; + if (list_size == 0) { + break; + } + } + } + TKET_ASSERT(bit_to_use != 0); + m_internal_data[bit_to_use].push_back(datum); +} + +FilteredSwapSequences::SingleSequenceData +FilteredSwapSequences::get_lookup_result( + SwapConversion::EdgesBitset edges_bitset, unsigned max_num_swaps) const { + // NOTE: this algorithm is quite crude, BUT it's so simple that + // apparently clever algorithms, although asymptotically more efficient, + // appear to be slower. + // The clever algorithms seem only worth doing if the table becomes + // much larger, >> 100 codes for each bit at least. + + max_num_swaps = std::min(max_num_swaps, 16u); + + // Value 0xFFF...F will never occur, + // because this would be 16 consecutive equal swaps...! + const auto impossible_max_code = + std::numeric_limits::max(); + + // Stop as soon as the swaps code gets too big. + SwapConversion::SwapHash max_code; + if (max_num_swaps == 16) { + max_code = impossible_max_code; + } else { + max_code = 1; + max_code <<= (4 * max_num_swaps); + --max_code; + } + TrimmedSingleSequenceData best_datum; + best_datum.swaps_code = impossible_max_code; + + for (const auto& entry : m_internal_data) { + if (entry.first > edges_bitset) { + // The swaps used by a sequence must be a SUBSET of the allowable edges. + // Therefore, the swaps bitset must be <= the edges bitset. + // Of course, it's a MAP, so the swaps bitsets are already in increasing + // order. + break; + } + if ((entry.first & edges_bitset) != entry.first) { + // Every swap sequence in this entry contains ALL of the given edges + // in the bitset key (as well as others), and thus it MUST be a subset + // of the given edges_bitset, otherwise the entire entry + // can be skipped. + continue; + } + const auto& list = entry.second; + for (const auto& single_entry : list) { + if (single_entry.swaps_code > max_code || + single_entry.swaps_code >= best_datum.swaps_code) { + // Because they're sorted by code value, + // all subsequent entries will be too big also. + break; + } + if ((single_entry.edges_bitset & edges_bitset) != + single_entry.edges_bitset) { + // The EXACT set of edges used must be a subset of edges_bitset, + // otherwise it's unsuitable - it uses a swap not allowed. + continue; + } + best_datum = single_entry; + } + } + + SingleSequenceData result; + if (best_datum.swaps_code < impossible_max_code) { + // We actually got a result. + result.edges_bitset = best_datum.edges_bitset; + result.swaps_code = best_datum.swaps_code; + result.number_of_swaps = + SwapConversion::get_number_of_swaps(result.swaps_code); + } + return result; +} + +size_t FilteredSwapSequences::get_total_number_of_entries() const { + size_t total = 0; + for (const auto& entry : m_internal_data) { + total += entry.second.size(); + } + return total; +} + +// Convert the raw SwapSequenceTable object into +// FilteredSwapSequences-compatible data. The key is the permutation hash; the +// value is the lookup object which can find solutions to given problems. +static std::map +construct_and_return_full_table() { + std::map result; + const auto raw_table = SwapSequenceTable::get_table(); + for (const auto& entry : raw_table) { + // The simplest nontrivial permutation arises from a single swap (a,b), + // which under the canonical relabelling is converted to (01), + // which has hash 2. + TKET_ASSERT(entry.first >= 2); + // The largest possible hash comes from (01)(23)(45). + TKET_ASSERT(entry.first <= 222); + result[entry.first].initialise(entry.second); + } + return result; +} + +static const std::map& get_full_table() { + static const auto full_table(construct_and_return_full_table()); + return full_table; +} + +FilteredSwapSequences::SingleSequenceData::SingleSequenceData( + unsigned permutation_hash, SwapConversion::EdgesBitset edges_bitset, + unsigned max_number_of_swaps) + : SingleSequenceData() { + if (permutation_hash == 0) { + // The identity mapping, always possible. + number_of_swaps = 0; + return; + } + if (edges_bitset == 0) { + // No swaps at all! This CAN happen...it just means that + // we haven't seen enough vertices to connect up the given ones; + // all solutions involve swaps using other vertices not yet seen + // (i.e., not in this subgraph). + // But it's not the identity, therefore it's impossible. + return; + } + + const auto& table = get_full_table(); + const auto citer = table.find(permutation_hash); + if (citer == table.cend()) { + // No result in the table. + return; + } + *this = citer->second.get_lookup_result(edges_bitset, max_number_of_swaps); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp new file mode 100644 index 0000000000..d5a0239f4a --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/PartialMappingLookup.cpp @@ -0,0 +1,94 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/PartialMappingLookup.hpp" + +#include + +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +const ExactMappingLookup::Result& PartialMappingLookup::operator()( + const VertexMapping& desired_mapping, const vector& edges, + const std::set& vertices_with_tokens_at_start, + unsigned max_number_of_swaps) { + const auto& exact_mapping_result = + m_exact_mapping_lookup(desired_mapping, edges, max_number_of_swaps); + + if (exact_mapping_result.success && exact_mapping_result.swaps.empty()) { + return exact_mapping_result; + } + + // Are there any empty vertices? + m_empty_source_vertices.clear(); + m_empty_target_vertices.clear(); + for (const auto& entry : desired_mapping) { + if (vertices_with_tokens_at_start.count(entry.first) == 0) { + m_empty_source_vertices.push_back(entry.first); + m_empty_target_vertices.push_back(entry.second); + } + } + if (m_empty_source_vertices.size() <= 1) { + // Only an exact lookup is needed (or possible). + return exact_mapping_result; + } + + // There are some empty vertices at the start. + // These END UP at empty target vertices + // (which, of course, might be completely different!) + // For next_permutation, let's permute the empty SOURCE vertices. + // They are already sorted, thus already at the first permutation + // in the ordering, because they came from the keys of desired_mapping. + { + const bool next_permutation = std::next_permutation( + m_empty_source_vertices.begin(), m_empty_source_vertices.end()); + TKET_ASSERT(next_permutation); + } + m_altered_mapping = desired_mapping; + + for (unsigned perm_count = 0;;) { + for (unsigned ii = 0; ii < m_empty_source_vertices.size(); ++ii) { + m_altered_mapping[m_empty_source_vertices[ii]] = + m_empty_target_vertices[ii]; + } + const auto& exact_map_result_for_permuted_vertices = + m_exact_mapping_lookup.improve_upon_existing_result( + m_altered_mapping, edges, max_number_of_swaps); + + if (exact_map_result_for_permuted_vertices.success && + exact_map_result_for_permuted_vertices.swaps.empty()) { + return exact_map_result_for_permuted_vertices; + } + ++perm_count; + if (perm_count >= m_parameters.max_number_of_empty_vertex_permutations || + !std::next_permutation( + m_empty_source_vertices.begin(), m_empty_source_vertices.end())) { + return exact_map_result_for_permuted_vertices; + } + } +} + +PartialMappingLookup::Parameters::Parameters() + : max_number_of_empty_vertex_permutations(10) {} + +PartialMappingLookup::Parameters& PartialMappingLookup::get_parameters() { + return m_parameters; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp new file mode 100644 index 0000000000..4fe2124468 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapConversion.cpp @@ -0,0 +1,90 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/SwapConversion.hpp" + +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +static vector get_swaps_fixed_vector() { + vector swaps; + for (unsigned ii = 0; ii < 6; ++ii) { + for (unsigned jj = ii + 1; jj < 6; ++jj) { + swaps.push_back(get_swap(ii, jj)); + } + } + TKET_ASSERT(swaps.size() == 15); + return swaps; +} + +static const vector& get_swaps_global() { + static const auto swaps_vect(get_swaps_fixed_vector()); + return swaps_vect; +} + +const Swap& SwapConversion::get_swap_from_hash(SwapHash x) { + TKET_ASSERT(x >= 1 && x <= 15); + return get_swaps_global().at(x - 1); +} + +static std::map get_swap_to_hash() { + const auto swaps = get_swaps_fixed_vector(); + std::map map; + for (unsigned ii = 0; ii < swaps.size(); ++ii) { + map[swaps[ii]] = ii + 1; + } + return map; +} + +static const std::map& +get_swap_to_hash_global() { + static const auto map(get_swap_to_hash()); + return map; +} + +SwapConversion::SwapHash SwapConversion::get_hash_from_swap(const Swap& swap) { + return get_swap_to_hash_global().at(swap); +} + +unsigned SwapConversion::get_number_of_swaps( + SwapConversion::SwapHash swaps_code) { + unsigned num_swaps = 0; + while (swaps_code != 0) { + ++num_swaps; + const auto swap_hash = swaps_code & 0xF; + swaps_code >>= 4; + TKET_ASSERT(swap_hash > 0); + TKET_ASSERT(swap_hash <= 15); + } + return num_swaps; +} + +SwapConversion::EdgesBitset SwapConversion::get_edges_bitset( + SwapHash swaps_code) { + EdgesBitset edges_bitset = 0; + while (swaps_code != 0) { + const auto swap_hash = swaps_code & 0xF; + TKET_ASSERT(swap_hash > 0); + edges_bitset |= (1u << (swap_hash - 1)); + swaps_code >>= 4; + } + return edges_bitset; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp new file mode 100644 index 0000000000..3c72369ce4 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapListSegmentOptimiser.cpp @@ -0,0 +1,181 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/SwapListSegmentOptimiser.hpp" + +#include +#include +#include + +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +const SwapListSegmentOptimiser::Output& +SwapListSegmentOptimiser::optimise_segment( + SwapID initial_id, const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list) { + m_best_optimised_swaps.clear(); + + // Nonzero if and only if a valid sequence of swaps was stored. + m_output.initial_segment_size = 0; + + // If the mapping has too many vertices, it MAY happen that + // adding more swaps REDUCES the number of vertices + // (since, some vertices may move back to their original positions, + // and hence be "ignored"). Thus, we ALLOW the lookup to fail a few times + // due to too many vertices before we give up. + const int max_consecutive_too_many_vertices = 5; + int too_many_vertices_count = max_consecutive_too_many_vertices; + + VertexMapping current_map; + { + const auto& initial_swap = swap_list.at(initial_id); + current_map[initial_swap.first] = initial_swap.second; + current_map[initial_swap.second] = initial_swap.first; + } + size_t current_number_of_swaps = 1; + VertexMapping current_map_copy; + for (auto next_id_opt = swap_list.next(initial_id);;) { + bool too_many_vertices = false; + + // As we keep adding swaps to a sequence and updating the resultant + // target->source vertex mapping, should we look up EVERY mapping in the + // table, or is it enough to do so only when the map increases in size, etc. + // etc.? Desperately need some theory here! We look up almost EVERYTHING, so + // table lookup is one possible slowdown; reducing unnecessary lookups is + // worthwhile. + // TODO: think of some theory, and experiment! + bool attempt_to_optimise = current_map.size() >= 3; + if (!attempt_to_optimise && !next_id_opt) { + // Because it's the FINAL segment, optimise it whatever we do. + attempt_to_optimise = true; + } + if (attempt_to_optimise) { + // We're going to attempt to optimise. + current_map_copy = current_map; + const auto& resize_result = map_resizing.resize_mapping(current_map); + if (resize_result.success) { + const auto& lookup_result = m_mapping_lookup( + current_map, resize_result.edges, vertices_with_tokens_at_start, + current_number_of_swaps); + + if (lookup_result.success) { + // We've got a new result from the table; do we store it? + bool should_store = m_output.initial_segment_size == 0; + if (!should_store) { + // Something IS stored, but is our new solution better? + // GCOVR_EXCL_START + TKET_ASSERT( + m_output.initial_segment_size >= m_best_optimised_swaps.size()); + // GCOVR_EXCL_STOP + const size_t current_decrease = + m_output.initial_segment_size - m_best_optimised_swaps.size(); + TKET_ASSERT(current_number_of_swaps >= lookup_result.swaps.size()); + const size_t new_decrease = + current_number_of_swaps - lookup_result.swaps.size(); + should_store = new_decrease > current_decrease; + } + if (should_store) { + m_output.initial_segment_size = current_number_of_swaps; + m_best_optimised_swaps = lookup_result.swaps; + } + } else { + if (lookup_result.too_many_vertices) { + too_many_vertices = true; + } + } + } else { + // We couldn't resize the mapping, so there must be too many vertices. + too_many_vertices = true; + // Also, the vertex mapping may be corrupted, so restore it + current_map = current_map_copy; + } + } + + if (too_many_vertices) { + --too_many_vertices_count; + if (too_many_vertices_count <= 0) { + break; + } + } else { + too_many_vertices_count = max_consecutive_too_many_vertices; + } + + // Now add a swap. + if (next_id_opt) { + const auto id = next_id_opt.value(); + const Swap swap = swap_list.at(id); + add_swap(current_map, swap); + ++current_number_of_swaps; + next_id_opt = swap_list.next(id); + } else { + // We've reached the end! + break; + } + } + fill_final_output_and_swaplist(initial_id, swap_list); + return m_output; +} + +void SwapListSegmentOptimiser::fill_final_output_and_swaplist( + SwapID initial_id, SwapList& swap_list) { + if (m_output.initial_segment_size == 0) { + // No improvement was found. + m_output.final_segment_size = 0; + m_output.new_segment_last_id = {}; + return; + } + m_output.final_segment_size = m_best_optimised_swaps.size(); + TKET_ASSERT(m_output.final_segment_size <= m_output.initial_segment_size); + const auto initial_size = swap_list.size(); + + if (m_best_optimised_swaps.empty()) { + swap_list.erase_interval(initial_id, m_output.initial_segment_size); + m_output.new_segment_last_id = {}; + } else { + const auto overwrite_result = swap_list.overwrite_interval( + initial_id, m_best_optimised_swaps.cbegin(), + m_best_optimised_swaps.cend()); + + // GCOVR_EXCL_START + TKET_ASSERT( + overwrite_result.number_of_overwritten_elements == + m_best_optimised_swaps.size()); + // GCOVR_EXCL_STOP + m_output.new_segment_last_id = + overwrite_result.final_overwritten_element_id; + + const size_t remaining_elements_to_erase = + m_output.initial_segment_size - m_output.final_segment_size; + + const auto next_id_opt = + swap_list.next(overwrite_result.final_overwritten_element_id); + if (next_id_opt) { + swap_list.erase_interval( + next_id_opt.value(), remaining_elements_to_erase); + } + } + // GCOVR_EXCL_START + TKET_ASSERT( + swap_list.size() + m_output.initial_segment_size == + initial_size + m_output.final_segment_size); + // GCOVR_EXCL_STOP +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp new file mode 100644 index 0000000000..a88a4a7e22 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapListTableOptimiser.cpp @@ -0,0 +1,234 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/SwapListTableOptimiser.hpp" + +#include +#include +#include + +#include "Utils/Assert.hpp" + +namespace tket { +namespace tsa_internal { + +enum class EmptySwapCheckResult { + NOT_EMPTY, + CONTINUE_AFTER_ERASURE, + TERMINATE_AFTER_ERASURE +}; + +// current_id is KNOWN to be valid. +// vertices_with_tokens is correct just BEFORE performing the swap. +// If the swap is empty, erase it and update current_id (to the next swap). +static EmptySwapCheckResult check_for_empty_swap( + const std::set& vertices_with_tokens, SwapID& current_id, + SwapList& swap_list) { + const auto swap = swap_list.at(current_id); + if (vertices_with_tokens.count(swap.first) != 0 || + vertices_with_tokens.count(swap.second) != 0) { + return EmptySwapCheckResult::NOT_EMPTY; + } + const auto next_id_opt = swap_list.next(current_id); + swap_list.erase(current_id); + if (!next_id_opt) { + return EmptySwapCheckResult::TERMINATE_AFTER_ERASURE; + } + current_id = next_id_opt.value(); + return EmptySwapCheckResult::CONTINUE_AFTER_ERASURE; +} + +// current_id is KNOWN to be valid. +// vertices_with_tokens is correct just BEFORE performing the swap. +// Keep erasing empty swaps and updating current_id +// until EITHER we hit a nonempty swap, OR we run out of swaps, +// and thus return false. +static bool erase_empty_swaps_interval( + const std::set& vertices_with_tokens, SwapID& current_id, + SwapList& swap_list) { + for (auto infinite_loop_guard = 1 + swap_list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + switch (check_for_empty_swap(vertices_with_tokens, current_id, swap_list)) { + case EmptySwapCheckResult::CONTINUE_AFTER_ERASURE: + // Maybe more to erase! + break; + case EmptySwapCheckResult::NOT_EMPTY: + return true; + case EmptySwapCheckResult::TERMINATE_AFTER_ERASURE: + return false; + default: + TKET_ASSERT(!"unknown EmptySwapCheckResult enum"); + break; + } + } + // Should never get here! + TKET_ASSERT(!"erase_empty_swaps_interval failed to terminate"); + return false; +} + +// current_id is KNOWN to be valid and nonempty. +// vertices_with_tokens is correct just BEFORE we perform the current swap. +// Perform the swap (i.e., updating vertices_with_tokens), +// and advance current_id to the next swap. +static bool perform_current_nonempty_swap( + std::set& vertices_with_tokens, SwapID& current_id, + const SwapList& swap_list) { + const auto swap = swap_list.at(current_id); + + if (vertices_with_tokens.count(swap.first) == 0) { + // No empty swaps! + TKET_ASSERT(vertices_with_tokens.count(swap.second) != 0); + // Second has a token, first doesn't. + TKET_ASSERT(vertices_with_tokens.insert(swap.first).second); + TKET_ASSERT(vertices_with_tokens.erase(swap.second) == 1); + } else { + // First has a token. + if (vertices_with_tokens.count(swap.second) == 0) { + // Second has no token. + TKET_ASSERT(vertices_with_tokens.erase(swap.first) == 1); + TKET_ASSERT(vertices_with_tokens.insert(swap.second).second); + } + } + + const auto next_id_opt = swap_list.next(current_id); + if (!next_id_opt) { + return false; + } + current_id = next_id_opt.value(); + return true; +} + +void SwapListTableOptimiser::optimise( + const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list, + SwapListOptimiser& swap_list_optimiser) { + if (vertices_with_tokens_at_start.empty()) { + swap_list.clear(); + return; + } + if (swap_list.empty()) { + return; + } + + // Because we'll go in both directions, we need to know + // which tokens exist at the END of the mapping. + auto vertices_with_tokens_at_end = vertices_with_tokens_at_start; + { + // Already checked to be nonempty. + auto current_id = swap_list.front_id().value(); + bool terminated_correctly = false; + for (auto infinite_loop_guard = 1 + swap_list.size(); + infinite_loop_guard > 0; --infinite_loop_guard) { + if (!erase_empty_swaps_interval( + vertices_with_tokens_at_end, current_id, swap_list)) { + terminated_correctly = true; + break; + } + if (!perform_current_nonempty_swap( + vertices_with_tokens_at_end, current_id, swap_list)) { + terminated_correctly = true; + break; + } + } + TKET_ASSERT(terminated_correctly); + if (swap_list.size() <= 1) { + return; + } + } + // Now begin the forward/backward loop. + for (auto infinite_loop_guard = 1 + swap_list.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto old_size = swap_list.size(); + optimise_in_forward_direction( + vertices_with_tokens_at_start, map_resizing, swap_list, + swap_list_optimiser); + + swap_list.reverse(); + optimise_in_forward_direction( + vertices_with_tokens_at_end, map_resizing, swap_list, + swap_list_optimiser); + + // Must reverse again to get back to start! + swap_list.reverse(); + const auto new_size = swap_list.size(); + TKET_ASSERT(new_size <= old_size); + if (new_size == old_size) { + return; + } + } + TKET_ASSERT(!"SwapListTableOptimiser::optimise"); +} + +void SwapListTableOptimiser::optimise_in_forward_direction( + const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list, + SwapListOptimiser& swap_list_optimiser) { + swap_list_optimiser.optimise_pass_with_frontward_travel(swap_list); + + m_segment_optimiser.optimise_segment( + swap_list.front_id().value(), vertices_with_tokens_at_start, map_resizing, + swap_list); + + if (swap_list.size() <= 1) { + return; + } + // Will always remain valid. We perform this swap and then optimise + // starting from the next one. + auto current_id = swap_list.front_id().value(); + auto vertices_with_tokens = vertices_with_tokens_at_start; + + for (size_t infinite_loop_guard = swap_list.size(); infinite_loop_guard != 0; + --infinite_loop_guard) { + if (!erase_empty_swaps_interval( + vertices_with_tokens, current_id, swap_list)) { + return; + } + // We now have a valid ID with nonempty swap. + if (!perform_current_nonempty_swap( + vertices_with_tokens, current_id, swap_list)) { + return; + } + + // NOW we want to optimise from this ID. + // However, we must be careful; maybe it will be erased, so we have + // to get the PREVIOUS and recover from there. + const auto previous_id_opt = swap_list.previous(current_id); + + m_segment_optimiser.optimise_segment( + current_id, vertices_with_tokens, map_resizing, swap_list); + + // We now want to set "current_id" to the first swap of + // the newly optimised segment (if any) - which may of course + // be unchanged, changed, or empty. + + // If there was no previous ID, we must have been at the front + // just before we optimised. + auto current_id_opt = swap_list.front_id(); + if (previous_id_opt) { + // There WAS a previous ID, so we CAN move onto the next. + current_id_opt = swap_list.next(previous_id_opt.value()); + } + if (!current_id_opt) { + return; + } + current_id = current_id_opt.value(); + } +} + +SwapListSegmentOptimiser& SwapListTableOptimiser::get_segment_optimiser() { + return m_segment_optimiser; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp new file mode 100644 index 0000000000..3687a546c1 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/SwapSequenceTable.cpp @@ -0,0 +1,1422 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/SwapSequenceTable.hpp" + +namespace tket { +namespace tsa_internal { + +SwapSequenceTable::Table SwapSequenceTable::get_table() { + Table map; + + // clang-format off + map[2] = { + 0x1, 0x262, 0x373, 0x484, 0x595, 0x27a72, 0x28b82, 0x29c92, + 0x36a63, 0x38d83, 0x39e93, 0x46b64, 0x47d74, 0x49f94, 0x56c65, 0x57e75, + 0x58f85, 0x27dbd72, 0x27ece72, 0x28dad82, 0x28fcf82, 0x29eae92, + 0x29fbf92, 0x36bdb63, 0x36cec63, 0x38bab83, 0x38fef83, 0x39cac93, + 0x39fdf93, 0x46ada64, 0x46cfc64, 0x47aba74, 0x47efe74, 0x49cbc94, + 0x49ede94, 0x56aea65, 0x56bfb65, 0x57aca75, 0x57dfd75, 0x58bcb85, + 0x58ded85, 0x2a8fea2f8, 0x2a9fda2f9, 0x2b7efb2e7, 0x2b9edb2e9, + 0x2c7dfc2d7, 0x2c8dec2d8, 0x3a8fca3f8, 0x3a9fba3f9, 0x3d6cfd3c6, + 0x3d9cbd3c9, 0x3e6bfe3b6, 0x3e8bce3b8, 0x4b7ecb4e7, 0x4b9eab4e9, + 0x4d6ced4c6, 0x4d9cad4c9, 0x4f6aef4a6, 0x4f7acf4a7, 0x5c7dbc5d7, + 0x5c8dac5d8, 0x5e6bde5b6, 0x5e8bae5b8, 0x5f6adf5a6, 0x5f7abf5a7, + }; + + map[3] = { + 0x16, 0x21, 0x62, 0x17a7, 0x18b8, 0x19c9, 0x2373, 0x2484, 0x2595, + 0x36a3, 0x3736, 0x3a31, 0x3a73, 0x46b4, 0x4846, 0x4b41, 0x4b84, 0x56c5, + 0x5956, 0x5c51, 0x5c95, 0x7a27, 0x8b28, 0x9c29, 0x17bd7b, + 0x17ce7c, 0x18ad8a, 0x18cf8c, 0x19ae9a, 0x19bf9b, 0x238d38, 0x239e39, + 0x247d47, 0x249f49, 0x257e57, 0x258f58, 0x36bd3b, 0x36ce3c, 0x3738b8, + 0x3739c9, 0x38d386, 0x38db83, 0x39e396, 0x39ec93, 0x3a3484, 0x3a3595, + 0x3ad8d3, 0x3ae9e3, 0x3b8ba3, 0x3bdb73, 0x3c9ca3, 0x3cec73, 0x3dbd31, + 0x3ece31, 0x46ad4a, 0x46cf4c, 0x47d476, 0x47da74, 0x4847a7, 0x4849c9, + 0x49f496, 0x49fc94, 0x4a7ab4, 0x4ada84, 0x4b4373, 0x4b4595, 0x4bd7d4, + 0x4bf9f4, 0x4c9cb4, 0x4cfc84, 0x4dad41, 0x4fcf41, 0x56ae5a, 0x56bf5b, + 0x57e576, 0x57ea75, 0x58f586, 0x58fb85, 0x5957a7, 0x5958b8, 0x5a7ac5, + 0x5aea95, 0x5b8bc5, 0x5bfb95, 0x5c5373, 0x5c5484, 0x5ce7e5, 0x5cf8f5, + 0x5eae51, 0x5fbf51, 0x7bd7b2, 0x7ce7c2, 0x8ad8a2, 0x8cf8c2, 0x9ae9a2, + 0x9bf9b2, 0x17bfe7fb, 0x17cfd7fc, 0x18aef8ea, 0x18ced8ec, 0x19adf9da, + 0x19bde9db, 0x238fe3f8, 0x239fd3f9, 0x247ef4e7, 0x249ed4e9, 0x257df5d7, + 0x258de5d8, 0x3738fcf8, 0x3739fbf9, 0x38dfcf83, 0x38fca3f8, 0x38fe3cf8, + 0x39efbf93, 0x39fba3f9, 0x39fd3bf9, 0x3a349f94, 0x3a358f85, 0x3ad9f9d3, + 0x3ae8f8e3, 0x3b8bcec3, 0x3b8fefb3, 0x3bdb3595, 0x3bdbe9e3, 0x3bfe31fb, + 0x3bfefb73, 0x3c9cbdb3, 0x3c9fdfc3, 0x3cec3484, 0x3cecd8d3, 0x3cfd31fc, + 0x3cfdfc73, 0x3d6cfcd3, 0x3d89c9d3, 0x3d9f96d3, 0x3e6bfbe3, 0x3e8f86e3, + 0x3e98b8e3, 0x47dece74, 0x47ecb4e7, 0x47ef4ce7, 0x4847ece7, 0x4849eae9, + 0x49eab4e9, 0x49ed4ae9, 0x49feae94, 0x4a7acfc4, 0x4a7efea4, 0x4ada4595, + 0x4adaf9f4, 0x4aef41ea, 0x4aefea84, 0x4b439e93, 0x4b457e75, 0x4bd9e9d4, + 0x4bf7e7f4, 0x4c9cada4, 0x4c9edec4, 0x4ced41ec, 0x4cedec84, 0x4cfc4373, + 0x4cfcd7d4, 0x4d6cecd4, 0x4d79c9d4, 0x4d9e96d4, 0x4f6aeaf4, 0x4f7e76f4, + 0x4f97a7f4, 0x57dbc5d7, 0x57df5bd7, 0x57edbd75, 0x58dac5d8, 0x58de5ad8, + 0x58fdad85, 0x5957dbd7, 0x5958dad8, 0x5a7abfb5, 0x5a7dfda5, 0x5adf51da, + 0x5adfda95, 0x5aea5484, 0x5aeaf8f5, 0x5b8baea5, 0x5b8dedb5, 0x5bde51db, + 0x5bdedb95, 0x5bfb5373, 0x5bfbe7e5, 0x5c538d83, 0x5c547d74, 0x5ce8d8e5, + 0x5cf7d7f5, 0x5e6bdbe5, 0x5e78b8e5, 0x5e8d86e5, 0x5f6adaf5, 0x5f7d76f5, + 0x5f87a7f5, 0x7dcfc2d7, 0x7ebfb2e7, 0x8dcec2d8, 0x8faea2f8, 0x9ebdb2e9, + 0x9fada2f9, + }; + + map[4] = { + 0x16a, 0x176, 0x1a7, 0x21a, 0x316, 0x321, 0x362, 0x62a, 0x6a3, 0x736, + 0xa31, 0xa73, 0x12712, 0x16bdb, 0x16cec, 0x178b8, 0x179c9, 0x18ad8, + 0x18b8a, 0x18d86, 0x18db8, 0x19ae9, 0x19c9a, 0x19e96, 0x19ec9, 0x1bd7b, + 0x1ce7c, 0x21bdb, 0x21cec, 0x23273, 0x2484a, 0x2595a, 0x26276, 0x318b8, + 0x319c9, 0x32484, 0x32595, 0x346b4, 0x34846, 0x34b41, 0x34b84, 0x356c5, + 0x35956, 0x35c51, 0x35c95, 0x38b28, 0x39c29, 0x46ad4, 0x46ba4, 0x47d46, + 0x4846a, 0x48476, 0x484a7, 0x4a7d4, 0x4ad41, 0x4ad84, 0x4b41a, 0x4ba84, + 0x4d416, 0x4d421, 0x4d462, 0x4d6b4, 0x4d846, 0x4db41, 0x4db84, 0x56ae5, + 0x56ca5, 0x57e56, 0x5956a, 0x59576, 0x595a7, 0x5a7e5, 0x5ae51, 0x5ae95, + 0x5c51a, 0x5ca95, 0x5e516, 0x5e521, 0x5e562, 0x5e6c5, 0x5e956, 0x5ec51, + 0x5ec95, 0x62bdb, 0x62cec, 0x6bdb3, 0x6cec3, 0x738b8, 0x739c9, 0x7a27a, + 0x8a348, 0x8ad38, 0x8b82a, 0x8b8a3, 0x8d386, 0x8d3b8, 0x9a359, 0x9ae39, + 0x9c92a, 0x9c9a3, 0x9e396, 0x9e3c9, 0xbdb31, 0xbdb73, 0xcec31, 0xcec73, + 0x128d812, 0x129e912, 0x16bfefb, 0x16cfdfc, 0x1714b41, 0x1715c51, + 0x178cfc8, 0x179bfb9, 0x18aef8e, 0x18b8cec, 0x18ced8c, 0x18cfca8, + 0x18cfec8, 0x18d8c9c, 0x18dcfc8, 0x18efb8e, 0x18efe86, 0x19adf9d, + 0x19bde9b, 0x19bfba9, 0x19bfdb9, 0x19c9bdb, 0x19dfc9d, 0x19dfd96, + 0x19e9b8b, 0x19ebfb9, 0x1bfe7fb, 0x1cfd7fc, 0x21befbe, 0x21cdfcd, + 0x232d8d3, 0x232e9e3, 0x2484cec, 0x2595bdb, 0x2628d86, 0x2629e96, + 0x28b2db8, 0x29c2ec9, 0x2a49f49, 0x2a58f58, 0x2d4284d, 0x2e5295e, + 0x318fcf8, 0x319fbf9, 0x3249f94, 0x3258f85, 0x346cf4c, 0x34849c9, + 0x349cb49, 0x349f964, 0x349fc94, 0x34b4373, 0x34b4959, 0x34b9f94, + 0x34cf84c, 0x34cfc41, 0x356bf5b, 0x358bc58, 0x358f865, 0x358fb85, + 0x35958b8, 0x35bf95b, 0x35bfb51, 0x35c5373, 0x35c5848, 0x35c8f85, + 0x38fc2f8, 0x39fb2f9, 0x46bcec4, 0x46ced4c, 0x46cf4ec, 0x46cfca4, + 0x4787b84, 0x47d4c9c, 0x47ef4e6, 0x4846cec, 0x48479c9, 0x4849e96, + 0x4849ec9, 0x484ae9e, 0x484c9ca, 0x484cec7, 0x49cad49, 0x49ed496, + 0x49f4976, 0x49f4e96, 0x49f4ec9, 0x49fca49, 0x4a7ef4e, 0x4ab7ab4, + 0x4ad4595, 0x4ad9f49, 0x4ae9ed4, 0x4ae9f4e, 0x4aefe41, 0x4aefe84, + 0x4b41cec, 0x4b4595a, 0x4b67b64, 0x4baf9f4, 0x4bd7bd4, 0x4c9cba4, + 0x4c9ed4c, 0x4ced41c, 0x4ced84c, 0x4cf41ca, 0x4cf41ec, 0x4cf84ca, + 0x4d419c9, 0x4d42595, 0x4d456c5, 0x4d45956, 0x4d45c95, 0x4d4c9c2, + 0x4d849c9, 0x4db4595, 0x4dcfc84, 0x4df6cf4, 0x4df96f4, 0x4dfc9f4, + 0x4ece7d4, 0x4ef421e, 0x4ef84e6, 0x4efb41e, 0x4efb84e, 0x4efe6b4, + 0x4f6aef4, 0x4f9f46a, 0x4f9f4a7, 0x4fecf84, 0x4fef416, 0x4fef462, + 0x56bde5b, 0x56bf5db, 0x56bfba5, 0x56cbdb5, 0x5797c95, 0x57df5d6, + 0x57e5b8b, 0x58bae58, 0x58de586, 0x58f5876, 0x58f5d86, 0x58f5db8, + 0x58fba58, 0x5956bdb, 0x59578b8, 0x5958d86, 0x5958db8, 0x595ad8d, + 0x595b8ba, 0x595bdb7, 0x5a7df5d, 0x5ac7ac5, 0x5ad8de5, 0x5ad8f5d, + 0x5adfd51, 0x5adfd95, 0x5ae5484, 0x5ae8f58, 0x5b8bca5, 0x5b8de5b, + 0x5bde51b, 0x5bde95b, 0x5bf51ba, 0x5bf51db, 0x5bf95ba, 0x5c51bdb, + 0x5c5484a, 0x5c67c65, 0x5caf8f5, 0x5ce7ce5, 0x5dbd7e5, 0x5df521d, + 0x5df95d6, 0x5dfc51d, 0x5dfc95d, 0x5dfd6c5, 0x5e518b8, 0x5e52484, + 0x5e546b4, 0x5e54846, 0x5e54b84, 0x5e5b8b2, 0x5e958b8, 0x5ebfb95, + 0x5ec5484, 0x5ef6bf5, 0x5ef86f5, 0x5efb8f5, 0x5f6adf5, 0x5f8f56a, + 0x5f8f5a7, 0x5fdbf95, 0x5fdf516, 0x5fdf562, 0x62befeb, 0x62cdfdc, + 0x6befe3b, 0x6cdfd3c, 0x738cf8c, 0x739bf9b, 0x84b8cec, 0x85ecf85, + 0x8aefe38, 0x8b2cec8, 0x8b8c5ec, 0x8b8ece3, 0x8ce348c, 0x8cf8c2a, + 0x8cfe38c, 0x8d389c9, 0x8d3fcf8, 0x8da2da8, 0x8eced38, 0x8ef86e3, + 0x8efb8e3, 0x8fa35f8, 0x8fcf8a3, 0x94dbf94, 0x95c9bdb, 0x9adfd39, + 0x9bd359b, 0x9bf9b2a, 0x9bfd39b, 0x9c2bdb9, 0x9c9b4db, 0x9c9dbd3, + 0x9dbde39, 0x9df96d3, 0x9dfc9d3, 0x9e398b8, 0x9e3fbf9, 0x9ea2ea9, + 0x9fa34f9, 0x9fbf9a3, 0xb5e541b, 0xb5efb51, 0xbefbe73, 0xbfefb31, + 0xc4d451c, 0xc4dfc41, 0xcdfcd73, 0xcfdfc31, 0x128fef812, + 0x129fdf912, 0x1714cfc41, 0x1715bfb51, 0x18d8c515c, 0x19e9b414b, + 0x2328fe3f8, 0x2329fd3f9, 0x242d427d4, 0x252e527e5, 0x2628fef86, + 0x2629fdf96, 0x28b2878b8, 0x28bfef2b8, 0x28fc2ecf8, 0x29c2979c9, + 0x29cfdf2c9, 0x29fb2dbf9, 0x2d4f9f24d, 0x2df5295fd, 0x2e5f8f25e, + 0x2ef4284fe, 0x34b49e3e9, 0x34cfc4373, 0x35bfb5373, 0x35c58d3d8, + 0x428427842, 0x46b469e96, 0x4787fcf84, 0x4849e98b8, 0x49f4befb9, + 0x4a7acfca4, 0x4abe9eab4, 0x4cb9cb4ec, 0x4cef7efc4, 0x4cf97f9c4, + 0x4cfdfc7d4, 0x4d45c5848, 0x4fc67c6f4, 0x529527952, 0x56c568d86, + 0x5797fbf95, 0x58f5cdfc8, 0x5958d89c9, 0x5a7abfba5, 0x5acd8dac5, + 0x5bc8bc5db, 0x5bdf7dfb5, 0x5bf87f8b5, 0x5bfefb7e5, 0x5e54b4959, + 0x5fb67b6f5, 0x7bd7b2bdb, 0x7ce7c2cec, 0x8edce5de8, 0x8fea2eaf8, + 0x9debd4ed9, 0x9fda2daf9, 0x242d42e9ed4, 0x252e52d8de5, + 0x2b28b29e98b, 0x2c29c28d89c, 0x428429e9842, 0x47ecb47e7ce, + 0x487845c5484, 0x49cb479c797, 0x4d45c54d7d4, 0x4fb9f479fbf, + 0x529528d8952, 0x57dbc57d7bd, 0x58bc578b787, 0x597954b4595, + 0x5e54b45e7e5, 0x5fc8f578fcf, 0x8ced82ce2c2, 0x8fdcf82cfdf, + 0x9bde92bd2b2, 0x9febf92bfef, 0xbf4efb7ef4f, 0xcf5dfc7df5f, + 0xdf85fd25f8f, 0xef94fe24f9f, + }; + + map[5] = { + 0x16ad, 0x16ba, 0x16db, 0x176d, 0x186a, 0x1876, 0x18a7, 0x1ad8, 0x1b8a, + 0x1d86, 0x1db8, 0x21ad, 0x21ba, 0x21db, 0x321d, 0x3d16, 0x3d62, 0x416a, + 0x4176, 0x41a7, 0x421a, 0x4316, 0x4321, 0x4362, 0x46a3, 0x4736, 0x4a31, + 0x4a73, 0x62ad, 0x62ba, 0x62db, 0x642a, 0x6a3d, 0x6ad4, 0x6b4a, 0x6d42, + 0x73d6, 0x76d4, 0x846a, 0x8476, 0x84a7, 0xa17d, 0xa31d, 0xa73d, 0xa7d4, + 0xad41, 0xad84, 0xb84a, 0xba41, 0xd416, 0xd421, 0xd6b4, 0xd846, 0xdb41, + 0xdb84, 0x12712d, 0x12812a, 0x12d812, 0x132813, 0x138136, 0x167b67, + 0x16aefe, 0x16bcec, 0x16cdfc, 0x16cecd, 0x16cfca, 0x16cfec, 0x16efbe, + 0x176efe, 0x1787b8, 0x179c9d, 0x17a7ba, 0x186cec, 0x187121, 0x187c9c, + 0x189ae9, 0x189c9a, 0x189ec9, 0x18a313, 0x18ce7c, 0x18e9e6, 0x19adf9, + 0x19ae9d, 0x19bf9a, 0x19c9ad, 0x19c9db, 0x19dbf9, 0x19df96, 0x19e96d, + 0x19f96a, 0x19f9a7, 0x19fae9, 0x19fca9, 0x19fe96, 0x19fec9, 0x1a7efe, + 0x1aef8e, 0x1b8cec, 0x1bd7bd, 0x1c9cba, 0x1c9dfc, 0x1c9ecd, 0x1cdf8c, + 0x1ce7dc, 0x1ced8c, 0x1cf8ec, 0x1d89c9, 0x1ef86e, 0x1efb8e, 0x1f976f, + 0x1fcf8a, 0x213b23, 0x21aefe, 0x21bcec, 0x21cdfc, 0x21cecd, 0x21cfca, + 0x21ecfe, 0x232473, 0x23273d, 0x237b23, 0x242a84, 0x243284, 0x2595ad, + 0x2595ba, 0x2595db, 0x26276d, 0x26286a, 0x262876, 0x262d86, 0x2d4284, + 0x316efe, 0x321efe, 0x327387, 0x32959d, 0x3436b4, 0x343846, 0x343b41, + 0x343b84, 0x356c5d, 0x35956d, 0x35c95d, 0x36a3ba, 0x373876, 0x373a87, + 0x3a31ba, 0x3a73ba, 0x3c51dc, 0x3d19c9, 0x3efe62, 0x412712, 0x416cec, + 0x4179c9, 0x419ae9, 0x419c9a, 0x419e96, 0x419ec9, 0x41ce7c, 0x421cec, + 0x42595a, 0x426276, 0x4319c9, 0x432959, 0x4356c5, 0x435956, 0x435c95, + 0x439c29, 0x43c5c1, 0x456ae5, 0x456c5a, 0x457e56, 0x45956a, 0x4595a7, + 0x45a7e5, 0x45ae51, 0x45c51a, 0x45e516, 0x45e562, 0x45e6c5, 0x45e965, + 0x45ec51, 0x45ec95, 0x462cec, 0x46ce3c, 0x4739c9, 0x47a27a, 0x48a348, + 0x495976, 0x495ae9, 0x495c9a, 0x49ae39, 0x49c2a9, 0x49ca39, 0x49e3c9, + 0x4a3595, 0x4ce31c, 0x4ce73c, 0x4e521e, 0x4e9e36, 0x56adf5, 0x56ae5d, + 0x56bf5a, 0x56c5ad, 0x56c5ba, 0x56c5db, 0x57df56, 0x57e56d, 0x58f56a, + 0x58f576, 0x58f5a7, 0x5956ad, 0x5956ba, 0x59576d, 0x59586a, 0x595876, + 0x5958a7, 0x595a7d, 0x595ad8, 0x595b8a, 0x595d6b, 0x595d86, 0x595db8, + 0x5a7df5, 0x5a7e5d, 0x5ad8f5, 0x5adf51, 0x5adf95, 0x5ae51d, 0x5ae95d, + 0x5b8f5a, 0x5bf51a, 0x5bf95a, 0x5c51ad, 0x5c51ba, 0x5c51db, 0x5c95ad, + 0x5c95ba, 0x5c95db, 0x5d6bf5, 0x5d8f56, 0x5db8f5, 0x5dbf95, 0x5df516, + 0x5df521, 0x5df562, 0x5df6c5, 0x5df956, 0x5dfc51, 0x5dfc95, 0x5e516d, + 0x5e521d, 0x5e956d, 0x5ed562, 0x5ed6c5, 0x5edc51, 0x5edc95, 0x5f516a, + 0x5f5176, 0x5f51a7, 0x5f521a, 0x5f5316, 0x5f5362, 0x5f56a3, 0x5f5736, + 0x5f6ae5, 0x5f6c5a, 0x5f7e56, 0x5f956a, 0x5f9576, 0x5fa7e5, 0x5fae95, + 0x5fca95, 0x5fe516, 0x5fe521, 0x5fe562, 0x5fe6c5, 0x5fe956, 0x5fec51, + 0x5fec95, 0x623b23, 0x62aefe, 0x62b676, 0x62bece, 0x62cdfc, 0x62cecd, + 0x62cfec, 0x62efbe, 0x62fcfa, 0x65f52a, 0x67b674, 0x6a3efe, 0x6aef4e, + 0x6b4cec, 0x6bd3bd, 0x6cd45c, 0x6cdf4c, 0x6cecd4, 0x6cf4ca, 0x6ece3d, + 0x6ecf4e, 0x6ef42e, 0x6efb4e, 0x7387b8, 0x73d9c9, 0x73fef6, 0x76efe4, + 0x787b84, 0x79c9d4, 0x7a2b7a, 0x7a72ad, 0x7a7ba4, 0x846cec, 0x8479c9, + 0x849ae9, 0x849c9a, 0x849e96, 0x84c9ec, 0x87b82b, 0x8ad83d, 0x8b28ba, + 0x8b28db, 0x8d863d, 0x8db83d, 0x95f9a7, 0x9a359d, 0x9ad459, 0x9adf49, + 0x9ae3d9, 0x9ae9d4, 0x9b459a, 0x9bf49a, 0x9c2ad9, 0x9c2ba9, 0x9c92db, + 0x9c932d, 0x9c9a3d, 0x9c9ad4, 0x9c9ba4, 0x9c9d42, 0x9c9db4, 0x9d4259, + 0x9d4596, 0x9d45c9, 0x9db459, 0x9dbf49, 0x9df496, 0x9dfc94, 0x9e963d, + 0x9e96d4, 0x9ec93d, 0x9ec9d4, 0x9f496a, 0x9f4976, 0x9f49a7, 0x9f4ae9, + 0x9f4e96, 0x9f4ec9, 0x9fc9a4, 0xa31efe, 0xa5f531, 0xa73fef, 0xa75f53, + 0xa7efe4, 0xaef41e, 0xafe51f, 0xafef84, 0xb238b2, 0xb41cec, 0xb84cec, + 0xbd73bd, 0xbdb3d1, 0xbdb7d4, 0xc51fca, 0xcd419c, 0xcd451c, 0xcd89c4, + 0xcdf41c, 0xcdf84c, 0xce73dc, 0xce7d4c, 0xcec3d1, 0xcec874, 0xcecd41, + 0xcecd84, 0xcfca41, 0xcfca84, 0xcfec41, 0xcfec84, 0xdbf5f1, 0xefb41e, + 0xefb84e, 0xefe2b1, 0xefe416, 0xefe421, 0xefe846, 0xf5321f, + 0x12712efe, 0x12812cec, 0x129df912, 0x129e912d, 0x129f912a, 0x12fe912f, + 0x12fef812, 0x1329f913, 0x1361b613, 0x13813c9c, 0x139f9136, 0x16b6e96e, + 0x16cfc676, 0x17a7cfca, 0x1813c5c1, 0x1815ae51, 0x1815c51a, 0x1815e516, + 0x181c5ec1, 0x181e5e21, 0x181ece31, 0x18715c51, 0x189e9b8b, 0x18b318b3, + 0x18e9e121, 0x197f97c9, 0x19bde9bd, 0x19eabea9, 0x19f9a313, 0x19fbefb9, + 0x1c9cbece, 0x1cdfd7dc, 0x1ce7cfef, 0x1d815c51, 0x1f97121f, 0x213cfc23, + 0x217b2172, 0x21b2e52e, 0x2324e9e3, 0x2325f573, 0x2328d38d, 0x232e9e3d, + 0x237cfc23, 0x242af9f4, 0x242cec84, 0x2432f9f4, 0x245e5284, 0x24d427d4, + 0x25efb7e5, 0x26276efe, 0x26286232, 0x2629df96, 0x2629e96d, 0x262e96fe, + 0x262fef86, 0x26826ece, 0x26f9726f, 0x27842784, 0x27a27a87, 0x2953b239, + 0x29c2fec9, 0x29ced2c9, 0x2b25e562, 0x2b32e9e3, 0x2bc9579c, 0x2be5295e, + 0x2d42f9f4, 0x2e52495e, 0x2e5295ed, 0x2e5295fe, 0x2ef4284e, 0x2f52a95f, + 0x2f53295f, 0x2f9579fb, 0x2fc2632f, 0x2fcf6276, 0x32373fef, 0x325fe8f5, + 0x32739f97, 0x329e3fe9, 0x3436cf4c, 0x343849c9, 0x3439f4c9, 0x343b4373, + 0x343c9cb4, 0x343f9f46, 0x34b34959, 0x34cf834c, 0x35c57387, 0x35c5d737, + 0x36a3686a, 0x36a3cfca, 0x3739f976, 0x373a9f97, 0x373b67b6, 0x373ce87c, + 0x38ba38ba, 0x39ca3ba9, 0x39e3afe9, 0x39e3fe96, 0x39e3fec9, 0x3a31cfca, + 0x3a3595ba, 0x3a73cfca, 0x3abe9e3a, 0x3c53473c, 0x3c5c4384, 0x3ce31fec, + 0x3ce73fec, 0x3cfe8fc2, 0x3e9c289e, 0x3f53c95f, 0x3f59635f, 0x4171b41b, + 0x4171c51c, 0x419e9121, 0x42629e96, 0x434b9f94, 0x434cfc41, 0x457ac57a, + 0x45ae5484, 0x45c5484a, 0x45c67c65, 0x45e54864, 0x45e54b84, 0x45e56b4b, + 0x45e7ce75, 0x48ec548e, 0x48ece348, 0x49597c9c, 0x49aea2a9, 0x49c29ece, + 0x4a34f9f4, 0x4b5e541b, 0x53f53c51, 0x56c5686a, 0x56c56876, 0x56c5b676, + 0x56c5bcec, 0x578795b8, 0x579c795d, 0x579c8795, 0x57ac57ba, 0x57e57876, + 0x57e578a7, 0x57e587b8, 0x57eab7a5, 0x5878b8f5, 0x5898ae95, 0x5898c95a, + 0x5898ec95, 0x58bc578b, 0x59567b67, 0x5957a7ba, 0x595cd89c, 0x595d7bd7, + 0x5989e956, 0x598c9532, 0x5a7abf5a, 0x5a7acad5, 0x5ade8de5, 0x5ae51bab, + 0x5ae95aba, 0x5aefe8f5, 0x5b6bf576, 0x5b8bc5db, 0x5b8bcba5, 0x5bce8bc5, + 0x5bcebc51, 0x5bcebc95, 0x5bdb7df5, 0x5bde95bd, 0x5c67c6d5, 0x5c6c5d86, + 0x5cb79c53, 0x5cbec7e5, 0x5cfca8f5, 0x5d8ded56, 0x5db8de5d, 0x5dbd7e5d, + 0x5de6bde5, 0x5debde51, 0x5df52959, 0x5dfcf8f5, 0x5e78ce75, 0x5ed7ce75, + 0x5f526276, 0x5f571721, 0x5f57a27a, 0x5f797c95, 0x5fa35935, 0x5fac7ac5, + 0x5fe6bf5b, 0x5feb8f5b, 0x5febfb95, 0x5fec7e57, 0x5fec8f58, 0x6268e9e6, + 0x6269f96a, 0x62b69e96, 0x63f5c35f, 0x65fc5676, 0x67cfc674, 0x6a5e5aba, + 0x6cfd3fdc, 0x6ece3fef, 0x717d5c51, 0x73787c9c, 0x7387fcf8, 0x73ec9bce, + 0x73f97c9f, 0x7871fcf8, 0x787fcf84, 0x79f53bf9, 0x7a27afef, 0x7a7cfca4, + 0x7bd7bd2b, 0x7cef47ec, 0x7f97c9f4, 0x849e98b8, 0x8795f259, 0x87cf85fc, + 0x89e9b82b, 0x8ad8a2ad, 0x8b28efbe, 0x8ced38dc, 0x8d3fcf8d, 0x8d89c93d, + 0x8e5f25e7, 0x8ef865fe, 0x8fce72cf, 0x92c2dfc9, 0x9ae9a2ad, 0x9ae9a2ba, + 0x9b2bc932, 0x9bce2bc9, 0x9bcebc94, 0x9bde9b3d, 0x9bf92b32, 0x9bf9b2ba, + 0x9bf9b2db, 0x9bfd3bd9, 0x9c2cfc9a, 0x9c2ec987, 0x9c9bdb3d, 0x9d3bd359, + 0x9dbded49, 0x9df963d3, 0x9dfc9d3d, 0x9e3bce98, 0x9e3febf9, 0x9eabea49, + 0x9f4befb9, 0x9fb2efb9, 0xa72cfca2, 0xad9f9d3d, 0xb6b49e96, 0xb82b5e52, + 0xb8b2bece, 0xb8fec3ef, 0xbc59835c, 0xbef51bfe, 0xc537f53c, 0xcdfcd73d, + 0xcdfcd7d4, 0xce7fec2b, 0xcf538f5c, 0xcfd3fd1c, 0xd45c5484, 0xe529589e, + 0xe98b598e, 0xef5bfe73, 0xf5395fb8, 0xf97c92cf, 0xfc239c2f, + 0x131813fcf8, 0x1361cfc613, 0x16b615e516, 0x17ce7bcebc, 0x1815e518b8, + 0x197c9bc979, 0x1bfe7bfebf, 0x2171cfc217, 0x2329fd3fd9, 0x24d42e9ed4, + 0x252f52a8f5, 0x252f5328f5, 0x254e5247e5, 0x25e7b25e25, 0x26239f9623, + 0x28429e9842, 0x2a27a29f97, 0x2b2129e912, 0x2c29c279cd, 0x2c29c2d89c, + 0x2e5f825f5e, 0x2ec982c9e2, 0x2f5e75e25f, 0x2f952795f2, 0x32e39e389e, + 0x3437fcf437, 0x353f536bf5, 0x353f538f56, 0x35c5d8d3d8, 0x37367cfc67, + 0x395fb35f93, 0x39ecb3ece9, 0x39fbafb3a9, 0x3a6a39f96a, 0x3ce98e93ec, + 0x3cfe38fec3, 0x3e3ce31bce, 0x3e3ce73bce, 0x3ea9e3a89e, 0x41714fcf41, + 0x419e914b41, 0x429c279c79, 0x434b439e39, 0x4529579525, 0x47ec27ec7e, + 0x513c51bc53, 0x529527952d, 0x529528952a, 0x52952d8952, 0x5295895325, + 0x52e527e587, 0x535f53bf51, 0x53bc538bc5, 0x53c537bc53, 0x579f97bf95, + 0x598a359353, 0x5ac7ac5878, 0x5ac8dac58d, 0x5bcb9c79c5, 0x5bfbefb7e5, + 0x5c6c586cec, 0x5dbc7dbc57, 0x5e6ae5868a, 0x5e7e5b6b76, 0x5e8bae58ba, + 0x5f517c515c, 0x626862e52e, 0x6befb3ef3e, 0x6cbec3ecbc, 0x7397f97bf9, + 0x7845c57848, 0x791f971bf9, 0x7975987259, 0x7e7ce7bce4, 0x7fec27ecf7, + 0x87f8cf82cf, 0x8c953895c8, 0x8efc2fc8fe, 0x8f8ef863ef, 0x8f8efb83ef, + 0x8fdcf8d2cf, 0x953c5bc593, 0x9596be969e, 0x968e963e98, 0x97c92c9879, + 0x97c97bc974, 0x97f97bf92b, 0x9895395896, 0x98e98b3e98, 0x9debde92b2, + 0x9eae92aefe, 0x9fadf92a2d, 0xa353f538f5, 0xae3fe8fe3e, 0xbc65cb635c, + 0xbcb9c2b79c, 0xbfbefb3ef1, 0xc2bec27ec2, 0xc2c98c92ca, 0xc2fc238fc2, + 0xc2fca8fc2c, 0xc5d45c7d74, 0xc9ca3fcacf, 0xcfdfc7df5f, 0xdf85f25f8f, + 0xe3febfe73e, 0xe52d7e5de7, 0xe545eb4595, 0xe7ec2ec7ed, 0xece5de8de5, + 0xf4b9f479fb, 0xf53b8f5bfb, 0xf919f319c9, 0xfbfe7febf4, 0xfef429f4f9, + 0x13191f913bf9, 0x1391c91bc913, 0x232e3fe38fe3, 0x24f4ef427ef4, + 0x2528f5278f52, 0x252e52de8de5, 0x252fdf527df5, 0x2a2ea9ea289e, + 0x2b2529527952, 0x2c29c2389c23, 0x2c2ec27ec287, 0x2f49f4279f42, + 0x3539538953b8, 0x353f537bf537, 0x37397c97bc97, 0x39ca3989ca98, + 0x3cafca38fca3, 0x3e39e369b69e, 0x3e3bce38bce3, 0x3e3c6ce386ce, + 0x4597954b4595, 0x4b454e54b7e5, 0x5395369b6953, 0x53c6c5386c53, + 0x5c51715bc517, 0x5f51715fbf51, 0x7d7fdcfd72cf, 0x7e7fe7bfe72b, + 0x8ced8c2cedc2, 0x8f8eaef82aef, + }; + + map[6] = { + 0x16adf, 0x16aed, 0x16afe, 0x16baf, 0x16cad, 0x16cba, 0x16cdb, 0x16dfc, + 0x16fec, 0x176df, 0x176ed, 0x176fe, 0x186af, 0x18f76, 0x18fa7, 0x196ad, + 0x196ba, 0x196db, 0x197d6, 0x1986a, 0x19876, 0x198a7, 0x19ad8, 0x19b8a, + 0x19d86, 0x19db8, 0x1a7ed, 0x1a7fe, 0x1ad8f, 0x1adf9, 0x1ae9d, 0x1afe9, + 0x1b8fa, 0x1bf9a, 0x1c9ad, 0x1c9ba, 0x1c9db, 0x1d86f, 0x1db8f, 0x1dbf9, + 0x1df96, 0x1dfc9, 0x1e96d, 0x1ec9d, 0x1f96a, 0x1f976, 0x1f9a7, 0x1fc9a, + 0x1fe96, 0x1fec9, 0x21adf, 0x21aed, 0x21afe, 0x21baf, 0x21cad, 0x21cba, + 0x21cdb, 0x21dbf, 0x21dfc, 0x21ecd, 0x21fca, 0x21fec, 0x316df, 0x316fe, + 0x321df, 0x321ed, 0x321fe, 0x3df62, 0x3ed16, 0x3ed62, 0x41a7f, 0x421af, + 0x4362f, 0x4f16a, 0x4f176, 0x4f316, 0x4f321, 0x4f62a, 0x4f6a3, 0x4f736, + 0x4fa31, 0x4fa73, 0x516ad, 0x516ba, 0x516db, 0x5176d, 0x5186a, 0x51876, + 0x518a7, 0x51ad8, 0x51db8, 0x521ad, 0x521ba, 0x521db, 0x5316d, 0x53d21, + 0x53d62, 0x5416a, 0x54176, 0x541a7, 0x542a1, 0x54316, 0x54321, 0x54362, + 0x546a3, 0x54736, 0x54a31, 0x54a73, 0x562ba, 0x562db, 0x56a3d, 0x56ad4, + 0x56b4a, 0x56db4, 0x573d6, 0x57d46, 0x5846a, 0x58476, 0x584a7, 0x5a31d, + 0x5a73d, 0x5a7d4, 0x5ad41, 0x5ad84, 0x5b41a, 0x5b84a, 0x5d416, 0x5d421, + 0x5d462, 0x5d846, 0x5db41, 0x5db84, 0x62adf, 0x62aed, 0x62afe, 0x62bfa, + 0x62cad, 0x62cba, 0x62cdb, 0x62dfc, 0x62ecd, 0x62fca, 0x62fec, 0x632fe, + 0x652ad, 0x6542a, 0x6a3df, 0x6a3ed, 0x6a3fe, 0x6ad4f, 0x6adf5, 0x6ae5d, + 0x6af53, 0x6b4fa, 0x6baf5, 0x6c5ad, 0x6c5ba, 0x6c5db, 0x6d42f, 0x6db4f, + 0x6df52, 0x6dfc5, 0x6e52d, 0x6ec5d, 0x6f52a, 0x6f532, 0x6fca5, 0x6fe52, + 0x6fec5, 0x73fe6, 0x763df, 0x763ed, 0x76d4f, 0x76df5, 0x76ed5, 0x76f53, + 0x76fe5, 0x84f6a, 0x84f76, 0x84fa7, 0x86af5, 0x876f5, 0x8f5a7, 0x956ad, + 0x956ba, 0x956db, 0x9576d, 0x9586a, 0x95876, 0x958a7, 0x95a7d, 0x95ad8, + 0x95b8a, 0x95d86, 0x95db8, 0xa17df, 0xa197d, 0xa31df, 0xa31ed, 0xa31fe, + 0xa517d, 0xa73df, 0xa73ed, 0xa73fe, 0xa7d4f, 0xa7df5, 0xa7e5d, 0xa7f53, + 0xad41f, 0xad84f, 0xad8f5, 0xadf51, 0xadf95, 0xae51d, 0xae95d, 0xaf517, + 0xaf531, 0xaf975, 0xafe95, 0xb4f1a, 0xba518, 0xba84f, 0xba8f5, 0xbaf51, + 0xbaf95, 0xc51ad, 0xc51db, 0xc95ad, 0xc95ba, 0xc95db, 0xcba51, 0xd16bf, + 0xd416f, 0xd421f, 0xd5186, 0xd62bf, 0xd6bf5, 0xd864f, 0xd86f5, 0xdb41f, + 0xdb84f, 0xdb8f5, 0xdbf51, 0xdbf95, 0xdf516, 0xdf521, 0xdf956, 0xdfc51, + 0xdfc95, 0xe956d, 0xed16c, 0xed516, 0xed521, 0xedc51, 0xedc95, 0xf16ca, + 0xf516a, 0xf5176, 0xf521a, 0xf5316, 0xf5321, 0xf6ae5, 0xf956a, 0xf9576, + 0xfa7e5, 0xfae51, 0xfc51a, 0xfca95, 0xfe516, 0xfe521, 0xfe965, 0xfec51, + 0xfec95, 0x12712df, 0x12712ed, 0x12712fe, 0x12812af, 0x128132f, + 0x128712f, 0x12912ad, 0x12912ba, 0x129132d, 0x129712d, 0x12d812f, + 0x12d9812, 0x12df912, 0x12e912d, 0x12f912a, 0x12f9712, 0x12fe912, + 0x1329813, 0x132f913, 0x1343fc9, 0x138136f, 0x139163d, 0x1391643, + 0x1396b23, 0x1398136, 0x13f9136, 0x142a914, 0x146ca34, 0x149146a, + 0x1491476, 0x167b67f, 0x167c67d, 0x167fc67, 0x168c68a, 0x16abeab, + 0x16bdbed, 0x16cb676, 0x16cd868, 0x16efbef, 0x178be78, 0x1797dc9, + 0x17a7baf, 0x17a7cad, 0x17a7fca, 0x17e7876, 0x18789b8, 0x187b8cb, + 0x18d86ed, 0x18f78b8, 0x18fa313, 0x19121db, 0x1914321, 0x191ad41, + 0x191b41a, 0x191d416, 0x191db41, 0x197a7ba, 0x198121a, 0x1987121, + 0x19879c9, 0x1989ca9, 0x1989e96, 0x1989ec9, 0x198a313, 0x19a3143, + 0x19b67b6, 0x19bd7bd, 0x19cd89c, 0x19d4214, 0x1a7acba, 0x1a7e787, + 0x1a7eaba, 0x1ad8ede, 0x1aef8ef, 0x1b8acbc, 0x1b8bcdb, 0x1b8cbec, + 0x1b8dbed, 0x1bcb6ec, 0x1bcbec9, 0x1bd7bdf, 0x1bd7ebd, 0x1bde9bd, + 0x1c687c6, 0x1cdf8cf, 0x1ce7bce, 0x1ce7cfe, 0x1e78ce7, 0x1e7ce7d, + 0x1eae9ba, 0x1ecf8ef, 0x1ef86ef, 0x1efb8ef, 0x1efebf9, 0x1f8cf8a, + 0x1f91a31, 0x213b23f, 0x213cb23, 0x213fc23, 0x214c24a, 0x21abeab, + 0x21bdbed, 0x21c232d, 0x21cebce, 0x21efbef, 0x2324f73, 0x2325473, + 0x23273ed, 0x232d8f5, 0x23723fe, 0x2372f53, 0x237c23d, 0x237c243, + 0x237cb23, 0x237fc23, 0x23c2187, 0x23d8fc2, 0x2432584, 0x243c284, + 0x24854ec, 0x248c24a, 0x252ad95, 0x252ba95, 0x253d295, 0x2542a95, + 0x2543295, 0x25925db, 0x259d425, 0x26276df, 0x26276ed, 0x26276fe, + 0x26286af, 0x262876f, 0x26296ad, 0x262976d, 0x2629876, 0x2629d86, + 0x262d86f, 0x262df96, 0x262e96d, 0x262f96a, 0x262f976, 0x262fe96, + 0x26926db, 0x28478fc, 0x2ae8748, 0x2bf3273, 0x2c14324, 0x2c23d62, + 0x2c24176, 0x2c24362, 0x2c26d42, 0x2c2d421, 0x2c62d86, 0x2c68132, + 0x2c87124, 0x2ca7387, 0x2ca8781, 0x2cd4284, 0x2d42584, 0x2d4284f, + 0x2df5295, 0x2e5295d, 0x2ec8478, 0x2f52a95, 0x2f53295, 0x2fc2362, + 0x2fc6276, 0x2fe5295, 0x3164e34, 0x3189eb8, 0x323573d, 0x3248e34, + 0x324f9e3, 0x327387f, 0x327397d, 0x3273987, 0x3273f97, 0x329589e, + 0x329e3d8, 0x32d373f, 0x32e3431, 0x32e3473, 0x32e7387, 0x34356b4, + 0x3435846, 0x3435b84, 0x3436b4f, 0x34384f6, 0x343b84f, 0x343bf95, + 0x347396b, 0x3486e34, 0x34b8e34, 0x34be341, 0x34bf9e3, 0x34bfe73, + 0x353d6c5, 0x353d956, 0x353dc51, 0x353dc95, 0x35436c5, 0x3543956, + 0x3543c95, 0x36a3baf, 0x36a3cad, 0x36a3cba, 0x36a3fca, 0x373876f, + 0x37396ba, 0x3739876, 0x373a87f, 0x373a987, 0x373af97, 0x373f976, + 0x3743bf9, 0x3796243, 0x3796b23, 0x389eb28, 0x3986235, 0x39e6b23, + 0x3a31baf, 0x3a31cad, 0x3a31cba, 0x3a31fca, 0x3a73baf, 0x3a73cba, + 0x3a73fca, 0x3b2f9e3, 0x3b8fe3a, 0x3d8b298, 0x3e3186a, 0x3e346a3, + 0x3e34a31, 0x3e436b4, 0x3e6ab3a, 0x3e76b23, 0x3ea31ba, 0x3eb2318, + 0x3eba8b2, 0x3f536c5, 0x3f53956, 0x3f53c51, 0x3f53c95, 0x417c9bc, + 0x42a8498, 0x42af484, 0x43198c9, 0x432484f, 0x4328498, 0x434b41f, + 0x435cf84, 0x436b4cb, 0x4384986, 0x4384b98, 0x4384fc9, 0x43b41cb, + 0x43b84cb, 0x43c9f4b, 0x4546ae5, 0x4546c5a, 0x4546ec5, 0x4547e56, + 0x454956a, 0x4549576, 0x45495a7, 0x454a7e5, 0x454ae51, 0x454ae95, + 0x454c51a, 0x454c95a, 0x454e516, 0x454e956, 0x454ec95, 0x45e4562, + 0x46ad4ed, 0x46b4cba, 0x46b4dcb, 0x473cb9c, 0x47d4ed6, 0x4846cad, + 0x4849876, 0x484ba98, 0x484db98, 0x486ca34, 0x48948a7, 0x4894ad8, + 0x489a348, 0x4a34584, 0x4a3484f, 0x4a35495, 0x4a7d4ed, 0x4ad41ed, + 0x4ad84ed, 0x4b84dcb, 0x4bcdb73, 0x4cb9ca3, 0x4d416ed, 0x4d421ed, + 0x4d864ed, 0x4db41ed, 0x4db84ed, 0x4de4d62, 0x4de6b4d, 0x4f12712, + 0x4f26276, 0x4f7a27a, 0x512712d, 0x512812a, 0x512d812, 0x5138136, + 0x5167b67, 0x517a7ba, 0x5186cec, 0x5187121, 0x518a313, 0x51bd7bd, + 0x5237b23, 0x5242a84, 0x526276d, 0x526286a, 0x5262d86, 0x52b2321, + 0x5327387, 0x53a31ba, 0x54179c9, 0x5426276, 0x5434b41, 0x543c5c1, + 0x545ec51, 0x5471271, 0x547a27a, 0x54e5e21, 0x5626876, 0x562b232, + 0x562b676, 0x56a3bab, 0x56bd3bd, 0x5736878, 0x57378a7, 0x57387b8, + 0x573a7ba, 0x57871b8, 0x5787b84, 0x57a27ad, 0x57a2b7a, 0x57ab47a, + 0x5813281, 0x58ad38d, 0x58b278b, 0x58b28db, 0x59a359d, 0x59ad459, + 0x59b459a, 0x59d4596, 0x59db459, 0x5b238b2, 0x5b28b2a, 0x5b6b476, + 0x5b8d3bd, 0x5bd31bd, 0x5bd73bd, 0x5bdb7d4, 0x5c9d45c, 0x5cd45c1, + 0x5cecd84, 0x5d38d36, 0x5d456c5, 0x623b23f, 0x624c24a, 0x626986a, + 0x62969ba, 0x62ae787, 0x62bdbed, 0x62c676d, 0x62c6876, 0x62c868a, + 0x62cb232, 0x62cb676, 0x62cbcec, 0x62eabea, 0x62efbef, 0x6324e34, + 0x672b67f, 0x67b674f, 0x67b67f5, 0x67c67d5, 0x67c6875, 0x67cb675, + 0x67f5267, 0x67fc675, 0x68c68a5, 0x69a3d89, 0x6abeab5, 0x6aef4ef, + 0x6b4cbec, 0x6bdb3df, 0x6bdbed5, 0x6c5bcec, 0x6c5d868, 0x6cbec3e, + 0x6cf4cfa, 0x6db3ed3, 0x6dfc4f4, 0x6ec3e43, 0x6ecd3e3, 0x6ece3fe, + 0x6ecf4ef, 0x6ef42ef, 0x6efb4ef, 0x6efbef5, 0x73697d9, 0x73876cb, + 0x73879b8, 0x7387b8f, 0x7397dc9, 0x73987c9, 0x73ac687, 0x73cb679, + 0x73db8cb, 0x73dcbc6, 0x73e6878, 0x73e87b8, 0x73f97c9, 0x7634e34, + 0x768e785, 0x76efe4f, 0x78795b8, 0x787b84f, 0x787fc51, 0x78fc537, + 0x79c1943, 0x7a27adf, 0x7a27afe, 0x7a2cb7a, 0x7a2fc7a, 0x7a72baf, + 0x7a7ba4f, 0x7a7baf5, 0x7a7cad5, 0x7a7cba5, 0x7a7f52a, 0x7a7fca5, + 0x7c9bc53, 0x7d4cb9c, 0x7e587b8, 0x7f51721, 0x7f971c9, 0x7f97c95, + 0x846898a, 0x8478b98, 0x84798c9, 0x8489d86, 0x8498ae9, 0x8498c9a, + 0x8498e96, 0x8498ec9, 0x84bae78, 0x84bc78b, 0x84e786c, 0x84edce7, + 0x86c24ec, 0x8795248, 0x87b82bf, 0x87b82cb, 0x87b8cb5, 0x87f8b85, + 0x8981ae9, 0x898ae95, 0x898c95a, 0x898e521, 0x898ec95, 0x89b82ae, + 0x89e5248, 0x89e54b8, 0x8ad3e8d, 0x8ad83df, 0x8ad8ed5, 0x8b2a798, + 0x8b2ae78, 0x8b2c8ba, 0x8b82aed, 0x8b8cba5, 0x8b8d2cb, 0x8b8dcb5, + 0x8b98e52, 0x8d863df, 0x8d863ed, 0x8d86ed5, 0x8db83ed, 0x8db8ed5, + 0x8e75248, 0x8fce72c, 0x91491a7, 0x919a31d, 0x9567b67, 0x957a7ba, + 0x95879c9, 0x95bd7bd, 0x95c79cd, 0x95cd89c, 0x95e89e6, 0x97dc92c, + 0x98135c9, 0x9862c32, 0x987c92c, 0x98ae93e, 0x98c92ca, 0x98c9532, + 0x98c9e2c, 0x98e963e, 0x98ec93e, 0x9adf94f, 0x9ae39ed, 0x9ae9f3e, + 0x9bf94fa, 0x9c29cba, 0x9c29cdb, 0x9c29dfc, 0x9c29ecd, 0x9c29fec, + 0x9c2d89c, 0x9c9a3df, 0x9ca3d89, 0x9dbf49f, 0x9df964f, 0x9dfc94f, + 0x9e3c9fe, 0x9e3feb8, 0x9f49f6a, 0x9f49fa7, 0x9f4ae9f, 0x9f4c9fa, + 0x9f4ec9f, 0x9f9764f, 0x9fe964f, 0xa27a2ed, 0xa2c7a2d, 0xa2d8fc2, + 0xa348e34, 0xa3e3473, 0xa73797d, 0xa73cacd, 0xa73e787, 0xa73eaba, + 0xa78e785, 0xa7d4bcb, 0xa7ef4ef, 0xab7eab5, 0xabeab51, 0xabeab95, + 0xae349e3, 0xaef41ef, 0xaefe8f5, 0xaf53959, 0xafe84f8, 0xb236298, + 0xb238b2f, 0xb238cb2, 0xb41abcb, 0xb41cbec, 0xb84acbc, 0xb84cbec, + 0xb8a2bf2, 0xb8cb2ec, 0xb8cba3e, 0xb8cbec5, 0xb8d3acb, 0xb8fec3e, + 0xbc5318b, 0xbcb5316, 0xbcbec95, 0xbd73ebd, 0xbd7bdf5, 0xbdb3df1, + 0xbdb3ed1, 0xbdb7ed5, 0xbdbed51, 0xbdbed95, 0xc23d9c2, 0xc24179c, + 0xc2419ec, 0xc2439c2, 0xc249c2a, 0xc2ec7d4, 0xc51ebce, 0xc534b73, + 0xc92cd42, 0xc9ac2cd, 0xc9b2c32, 0xc9bce2c, 0xc9c2fca, 0xc9e3bce, + 0xc9ed4bc, 0xca37943, 0xcb2179c, 0xcbec3e1, 0xcbec73e, 0xcbec7e5, + 0xcdf84cf, 0xcdfc4f1, 0xcdfc8f5, 0xce2c417, 0xce73ced, 0xce7d4bc, + 0xce7db2c, 0xcec3ed1, 0xcec7d4f, 0xcecf3e1, 0xcecf73e, 0xcfca4f1, + 0xcfca84f, 0xcfca8f5, 0xcfec4f1, 0xcfec84f, 0xd3b8d3f, 0xd3bdf73, + 0xd3bf9e3, 0xd428498, 0xd4b41cb, 0xd7bd74f, 0xd7fc537, 0xd8498c9, + 0xdb28b2f, 0xe3186ce, 0xe34c9e3, 0xe34ce31, 0xe34ce73, 0xe3ced84, + 0xe73ce87, 0xe78a72c, 0xe78ce75, 0xe963e43, 0xe96de3e, 0xec54384, + 0xec9d3e3, 0xecd7e57, 0xecf8d3e, 0xefb84ef, 0xefbef51, 0xefe4f16, + 0xefe4f21, 0xefe84f6, 0xefe8f56, 0xefeb4f1, 0xf5318b8, 0xf8e5248, + 0xf8ecf85, 0xf8ef5b8, 0xf97c92c, 0xf9e39e6, 0xfbefb95, 0xfc239c2, + 0xfe7ce75, 0x124914712, 0x127128e78, 0x128d812ed, 0x129e8912e, + 0x12f8ef812, 0x1315813b8, 0x1319143c9, 0x132b9123b, 0x1361b613f, + 0x1361c613d, 0x1361c6143, 0x1361fc613, 0x136813c68, 0x136cb6136, + 0x1396b6136, 0x1461c6a14, 0x1467c6147, 0x14712e714, 0x1476e7147, + 0x14914c9ca, 0x14e714ce7, 0x168a6ea68, 0x16c6d4164, 0x16c868cec, + 0x176be76b7, 0x179f97bf9, 0x17a7ca787, 0x17d7bd7cb, 0x18131b8cb, + 0x1898be98b, 0x18b8abeab, 0x18ced8ced, 0x18dad8cad, 0x18fb318b3, + 0x18fc78fc8, 0x191319c9d, 0x1913a31ba, 0x191419ae9, 0x1914b34b1, + 0x191d3bd31, 0x1941479c9, 0x194149ec9, 0x196be969e, 0x1983138b8, + 0x1983139c9, 0x19cd419c4, 0x1a7e71417, 0x1c979bc97, 0x1cfd7fdcf, + 0x1ebfe7bfe, 0x1f91319c9, 0x214717c24, 0x214aea24a, 0x217b2172f, + 0x217cb2172, 0x217fc2127, 0x2183c2183, 0x23258d38d, 0x2328d38df, + 0x2329e389e, 0x2329e39fe, 0x232de8d3e, 0x232e349e3, 0x232e39e3d, + 0x2378c2378, 0x23d8c23d3, 0x242a9f49f, 0x242c2ec84, 0x24329f49f, + 0x248a2ea24, 0x248e5e254, 0x24d427d4f, 0x24d47dc24, 0x254e527e5, + 0x25df85f25, 0x25f52a8f5, 0x26239623d, 0x2623f9623, 0x26249624a, + 0x26286232f, 0x2628d86ed, 0x262962d42, 0x26298e96e, 0x262e7e876, + 0x262f8ef86, 0x26b96b276, 0x278425784, 0x27952795d, 0x279528795, + 0x27a27a87f, 0x27a27a97d, 0x27a27a987, 0x27a27af97, 0x27a2e7a87, + 0x27a2eab7a, 0x2842784f2, 0x286239862, 0x28952a895, 0x289532895, + 0x28b278b98, 0x28b28ba98, 0x28b28db98, 0x28b2db8ed, 0x2952d8952, + 0x296243962, 0x296b23962, 0x2989e5925, 0x2b238b298, 0x2c1287128, + 0x2c1424cec, 0x2c212712d, 0x2c212812a, 0x2c212d812, 0x2c6286cec, + 0x2c7842784, 0x2ca27a287, 0x2d425d7d4, 0x2d4284ded, 0x2d42f49f4, + 0x2e527e587, 0x2e52e7e5d, 0x2e52e7fe5, 0x2e52fe8f5, 0x2ea2462ea, + 0x2ea6826ea, 0x2ef4824ef, 0x2f532f8f5, 0x2f9527952, 0x3168e3183, + 0x318be3183, 0x3218e3183, 0x327349734, 0x32be32b73, 0x32d38d398, + 0x3436cf4cf, 0x3438498c9, 0x3439bf49f, 0x3439f49f6, 0x3439f4c9f, + 0x343cf41cf, 0x343f84cf8, 0x34be34737, 0x35378c537, 0x353dc5373, + 0x35483c548, 0x35c813581, 0x35f536bf5, 0x35f538f56, 0x35f53bf51, + 0x36a3696ad, 0x36a3696ba, 0x36a36986a, 0x36a36f96a, 0x36a3c686a, + 0x36a68f36a, 0x36aca34a3, 0x37367b67f, 0x373687c68, 0x3736c67d6, + 0x37387b8cb, 0x373b67cb6, 0x373b967b6, 0x373fc67c6, 0x3793bd7bd, + 0x389538956, 0x38ba38baf, 0x38d38d986, 0x38d38db98, 0x38d3a8d98, + 0x38d3b8dcb, 0x397349736, 0x39734a973, 0x3979a73ba, 0x39895c935, + 0x39ca389ca, 0x39ca39cad, 0x39ca39fca, 0x3a31ca343, 0x3a348ca34, + 0x3a783ca78, 0x3ac8d3a8d, 0x3b2e328b2, 0x3b4375b43, 0x3b437b43f, + 0x3b459b435, 0x3b8bacb3a, 0x3bc538bc5, 0x3bc53b6c5, 0x3bc53bc51, + 0x3bd31bdcb, 0x3bd73bdcb, 0x3c537bc53, 0x3c537c543, 0x3c537cf53, + 0x3c9cacb3a, 0x3ca349ca3, 0x3ca34ca73, 0x3cbc5c935, 0x3cf53c8f5, + 0x3d36bd3cb, 0x3e213b23b, 0x3e318a313, 0x3e3b2b632, 0x3e73b67b6, + 0x3ea6a386a, 0x3eba8ba3a, 0x3f53fb8f5, 0x3f53fbf95, 0x414914e96, + 0x41714b4f1, 0x4171b41cb, 0x436b4696b, 0x437b437cb, 0x438468c68, + 0x439cb49cb, 0x452e52495, 0x454797c95, 0x4548ae548, 0x4548e5486, + 0x454ac7ac5, 0x454be541b, 0x454c5484a, 0x454c6c576, 0x454e546b4, + 0x454ec7e57, 0x462ce42c4, 0x46ad4cadc, 0x46b4696ba, 0x46b4abeab, + 0x46b4cb676, 0x46b4d696b, 0x46ced4ced, 0x47145b471, 0x4715c4571, + 0x47abc4b7a, 0x47d4797d6, 0x47d479a7d, 0x47d497dc9, 0x47d4a7cad, + 0x48467c687, 0x48478be78, 0x48478e786, 0x484c68c6a, 0x484dc68c6, + 0x484e78ce7, 0x484e78ea7, 0x49cb4d9cb, 0x49ed49ed6, 0x49ed4c9ed, + 0x4a34f49f4, 0x4ad41acad, 0x4ad84acad, 0x4aed49ed4, 0x4b41abeab, + 0x4b84eabea, 0x4bc7d74db, 0x4c2462c76, 0x4c249c2ec, 0x4c548ec54, + 0x4c9cbc4ba, 0x4ced41ced, 0x4ced84ced, 0x4db7d497d, 0x4e54b8e54, + 0x4ece7de4d, 0x51361b613, 0x5181ae51e, 0x5181ec51e, 0x518c515ca, + 0x51d81c51c, 0x5217b2172, 0x521b2e52e, 0x526286232, 0x52a27a287, + 0x52be5295e, 0x537367b67, 0x538ba38ba, 0x53a3595ba, 0x53a6a386a, + 0x562b25e52, 0x5715c571d, 0x5715c5871, 0x5815e5281, 0x5815e5816, + 0x58da2da8d, 0x592593b23, 0x59d3bd359, 0x5b25e58b2, 0x5b7db27db, + 0x5d45c5484, 0x62c868232, 0x67cfc674f, 0x68a6ea685, 0x69b674b69, + 0x6bfbefb3e, 0x6c5e86ce8, 0x6c686ec3e, 0x6cfdfc3df, 0x719417b41, + 0x71f5c1571, 0x7349734c9, 0x7387cf8cf, 0x739f97bf9, 0x73bc97bc9, + 0x76be76b75, 0x787489248, 0x787cf8cf5, 0x787f4cf84, 0x79f97bf95, + 0x7a72c42a4, 0x7a7ca7875, 0x7a7cfca4f, 0x7bd74debd, 0x7bd7bd2bf, + 0x7bd7bd2cb, 0x7bd7bdcb5, 0x7c97bc957, 0x7ce7bce2c, 0x7ce7ced2c, + 0x7ce7cfe2c, 0x7ce7fec4f, 0x7e78ce72c, 0x7f97c94f4, 0x8468c68ec, + 0x8498be98b, 0x87f8cf82c, 0x898a35935, 0x898c92c32, 0x8ad82adf2, + 0x8ad8a2aed, 0x8ad8a2cad, 0x8adac58ad, 0x8b28efbef, 0x8b2eae8ba, + 0x8b8abeab5, 0x8b8cbec3e, 0x8ced8ced5, 0x8cf8cf2ca, 0x8cf8cfe2c, + 0x8cf8dcf2c, 0x8cf8ecf3e, 0x8d3cf8dcf, 0x8d89c3d89, 0x8e318ce31, + 0x8e98be985, 0x8eced3e8d, 0x8ef86ef3e, 0x8efb8ef3e, 0x8f8aef83e, + 0x95e6b69e6, 0x96b6e963e, 0x98ae9a2ae, 0x98be98b3e, 0x9adf93df3, + 0x9ae93eaba, 0x9ae9a2aed, 0x9ae9fa2ae, 0x9bde39ebd, 0x9bf92bf32, + 0x9bf9b2bfa, 0x9bf9b2dbf, 0x9bf9db3df, 0x9c9bce4bc, 0x9df96d3df, + 0x9dfc9d3df, 0x9e3bfb9fe, 0x9febf92bf, 0xa2eab9ea2, 0xa35f538f5, + 0xb6296be96, 0xbcb9c279c, 0xbefb9f4ef, 0xbefbef3e1, 0xbefbef73e, + 0xbefbef7e5, 0xcdfc7df57, 0xcdfcd73df, 0xcdfcd7d4f, 0xcdfd31cdf, + 0xce348ce34, 0xcfc238fc2, 0xd427d4ed7, 0xea72ae42a, 0xf97bf94fb, + 0x1241914e912, 0x131813f8cf8, 0x1319f913bf9, 0x131c91bc913, + 0x14161c614ec, 0x1461a6ea614, 0x147a71ca714, 0x14b714be714, + 0x2181a2ea218, 0x21c21812ece, 0x232be32b9e3, 0x232d3fd9fd3, + 0x232ef8fe3fe, 0x252be5257e5, 0x258f5278f52, 0x25e52de8de5, + 0x25fdf527df5, 0x262b76e76b6, 0x28b298be98b, 0x2a2da8da298, + 0x2b87eb872b8, 0x31e31b613b6, 0x32739732b23, 0x3437f4cf437, + 0x34b43e349e3, 0x359538953b8, 0x35f537bf537, 0x36a96a3a439, + 0x36c686c53c5, 0x373467c6734, 0x395369b6953, 0x3aba38ba398, + 0x3ba3fb9fba3, 0x3cfca38fca3, 0x3d38d368c68, 0x3d3b6bd396b, + 0x3d3cbd9cbd3, 0x3e326238623, 0x4171f4fcf41, 0x42c249c279c, + 0x42d427d497d, 0x45295795245, 0x48427842e78, 0x48468a6ea68, + 0x4847a78ca78, 0x4c24ec27ec2, 0x4c9acad4adc, 0x4d7d467c67d, + 0x4da6ad496ad, 0x516be51615e, 0x51e51815eb8, 0x526826e525e, + 0x535c538d38d, 0x59e545eb459, 0x5c54d45c7d4, 0x5c58457845c, + 0x67962462967, 0x69eb69e4b69, 0x715f517bf51, 0x71c517bc517, + 0x7a7eba4baea, 0x7d7fdcfd72c, 0x7ecb4ecb7ec, 0x83a318ca318, + 0x842984e9842, 0x8ded8ced82c, 0x8f8eaef82ae, 0x91721b21791, + 0x9379437b437, 0x97f97bf92bf, 0x9c97bc974bc, 0x9fdadf92adf, + 0xa24ea249ea2, 0xa2a79a72bab, 0xbd4ed9ed4bd, 0xd7db2db7ede, + 0xef94f24f9ef, 0xf4bfe7febf4, + }; + + map[22] = { + 0x1a, 0x1232, 0x1676, 0x1bdb, 0x1cec, 0x2362, 0x262a, 0x3273, 0x373a, + 0x484a, 0x595a, 0x6276, 0x7367, 0x121712, 0x124d42, 0x125e52, + 0x131613, 0x134b43, 0x135c53, 0x168d86, 0x169e96, 0x178b87, 0x179c97, + 0x1bfefb, 0x1cfdfc, 0x232484, 0x232595, 0x238b28, 0x239c29, 0x24d462, + 0x25e562, 0x262bdb, 0x262cec, 0x27a27a, 0x28b2a8, 0x29c2a9, 0x328d38, + 0x329e39, 0x34b473, 0x35c573, 0x36a36a, 0x373bdb, 0x373cec, 0x38d3a8, + 0x39e3a9, 0x467684, 0x46b46a, 0x46b476, 0x47d467, 0x47d47a, 0x484cec, + 0x48bdb4, 0x4b6db4, 0x4d7bd4, 0x4f9f4a, 0x567695, 0x56c56a, 0x56c576, + 0x57e567, 0x57e57a, 0x595bdb, 0x59cec5, 0x5c6ec5, 0x5e7ce5, 0x5f8f5a, + 0x628d86, 0x629e96, 0x738b87, 0x739c97, 0x8b2db8, 0x8d3bd8, 0x9c2ec9, + 0x9e3ce9, 0xb4384b, 0xb8478b, 0xc5395c, 0xc9579c, 0xd4284d, 0xd8468d, + 0xe5295e, 0xe9569e, 0x1218d812, 0x1219e912, 0x124efe42, 0x125dfd52, + 0x1318b813, 0x1319c913, 0x134cfc43, 0x135bfb53, 0x1614d416, 0x1615e516, + 0x168efe86, 0x169dfd96, 0x1714b417, 0x1715c517, 0x178cfc87, 0x179bfb97, + 0x1b45e54b, 0x1b89e98b, 0x1c54d45c, 0x1c98d89c, 0x23249f94, 0x23258f85, + 0x238cfc28, 0x239bfb29, 0x24d42959, 0x24d49c29, 0x24efe462, 0x25dfd562, + 0x25e52848, 0x25e58b28, 0x262befeb, 0x262cdfdc, 0x28b82cec, 0x28dad82a, + 0x28fcf82a, 0x29c92bdb, 0x29eae92a, 0x29fbf92a, 0x328efe38, 0x329dfd39, + 0x34b43959, 0x34b49e39, 0x34cfc473, 0x35bfb573, 0x35c53848, 0x35c58d38, + 0x373dcfcd, 0x373ebfbe, 0x38bab83a, 0x38d83ece, 0x38fef83a, 0x39cac93a, + 0x39e93dbd, 0x39fdf93a, 0x42d427d4, 0x43b436b4, 0x46769f94, 0x469e9684, + 0x46ad46a6, 0x46b49e96, 0x46b6cec4, 0x46cfc6a4, 0x479c9784, 0x47ab47a7, + 0x47d49c97, 0x47d7ece4, 0x47efe7a4, 0x484befbe, 0x484cfdfc, 0x49cb4ec9, + 0x49ed4ce9, 0x49f49cec, 0x49f4bdb9, 0x4abc9cb4, 0x4ade9ed4, 0x4b6efb4e, + 0x4bc9cdb4, 0x4c9f479c, 0x4cfd9f4c, 0x4d7cfd4c, 0x4db9ed49, 0x4e9f469e, + 0x4efb9f4e, 0x4f6cf476, 0x4f7ef467, 0x4fc6ecf4, 0x4fe7cef4, 0x52e527e5, + 0x53c536c5, 0x56768f85, 0x568d8695, 0x56ae56a6, 0x56bfb6a5, 0x56c58d86, + 0x56c6bdb5, 0x578b8795, 0x57ac57a7, 0x57dfd7a5, 0x57e58b87, 0x57e7dbd5, + 0x58bc5db8, 0x58de5bd8, 0x58f58bdb, 0x58f5cec8, 0x595bfefb, 0x595cdfcd, + 0x5acb8bc5, 0x5aed8de5, 0x5b8f578b, 0x5bfe8f5b, 0x5c6dfc5d, 0x5cb8bec5, + 0x5d8f568d, 0x5dfc8f5d, 0x5e7bfe5b, 0x5ec8de58, 0x5f6bf576, 0x5f7df567, + 0x5fb6dbf5, 0x5fd7bdf5, 0x628ef86e, 0x629df96d, 0x738cf87c, 0x739bf97b, + 0x86d863d8, 0x87b872b8, 0x8b2efb8e, 0x8c5f835c, 0x8d3cfd8c, 0x8e5f825e, + 0x8fc2ecf8, 0x8fe3cef8, 0x96e963e9, 0x97c972c9, 0x9b4f934b, 0x9c2dfc9d, + 0x9d4f924d, 0x9e3bfe9b, 0x9fb2dbf9, 0x9fd3bdf9, 0xb45e584b, 0xb849e98b, + 0xbe54b95e, 0xbe98b59e, 0xbf5395fb, 0xbf9579fb, 0xc54d495c, 0xc958d89c, + 0xcd45c84d, 0xcd89c48d, 0xcf4384fc, 0xcf8478fc, 0xdf5295fd, 0xdf9569fd, + 0xef4284fe, 0xef8468fe, 0x1218ef812e, 0x1219df912d, 0x1318cf813c, + 0x1319bf913b, 0x1614ef416e, 0x1615df516d, 0x1714cf417c, 0x1715bf517b, + 0x1b419e914b, 0x1b815e518b, 0x1c518d815c, 0x1c914d419c, 0x2428427842, + 0x2529527952, 0x2a28fea2f8, 0x2a29fda2f9, 0x2b2db27db2, 0x2c2ec27ec2, + 0x3438436843, 0x3539536953, 0x3a38fca3f8, 0x3a39fba3f9, 0x3bd3bd6bd3, + 0x3ce3ce6ce3, 0x42d42e9ed4, 0x43b43c9cb4, 0x49cb4797c9, 0x49ed4696e9, + 0x4a6aef4ea6, 0x4a7acf4ca7, 0x4ab9eab4e9, 0x4ad9cad4c9, 0x4b45e546b4, + 0x4d45c547d4, 0x4fbefb7ef4, 0x4fc6dfcdf4, 0x52e52d8de5, 0x53c53b8bc5, + 0x58bc5787b8, 0x58de5686d8, 0x5a6adf5da6, 0x5a7abf5ba7, 0x5ac8dac5d8, + 0x5ae8bae5b8, 0x5c54d456c5, 0x5e54b457e5, 0x5fb6efbef5, 0x5fcdfc7df5, + 0x8b289e98b8, 0x8d389c98d8, 0x8fbefb3ef8, 0x8fc2dfcdf8, 0x9c298d89c9, + 0x9e398b89e9, 0x9fb2efbef9, 0x9fcdfc3df9, 0xbf49f479fb, 0xbf538f58fb, + 0xcf439f49fc, 0xcf58f578fc, 0xdf49f469fd, 0xdf528f58fd, 0xef429f49fe, + 0xef58f568fe, 0x2428429e9842, 0x2529528d8952, 0x2b29edbed2b9, + 0x2c28decde2c8, 0x3438439c9843, 0x3539538b8953, 0x3d39cbdcb3d9, + 0x3e38bcebc3e8, 0x42f4ef427ef4, 0x43f4cf436cf4, 0x4846845e5486, + 0x4847845c5487, 0x4b7ecb47e7ce, 0x4ced6ced46c6, 0x52f5df527df5, + 0x53f5bf536bf5, 0x5956954d4596, 0x5957954b4597, 0x5bde6bde56b6, + 0x5c7dbc57d7bd, 0x86f8ef863ef8, 0x87f8cf872cf8, 0x96f9df963df9, + 0x97f9bf972bf9, + }; + + map[32] = { + 0x16d, 0x21d, 0x62d, 0x16343, 0x16787, 0x16efe, 0x178a7, 0x187b8, + 0x19c9d, 0x1a7ba, 0x1aba6, 0x1b8ab, 0x21343, 0x21aba, 0x21efe, 0x23473, + 0x24384, 0x2595d, 0x27387, 0x27871, 0x28478, 0x3186a, 0x34362, 0x346a3, + 0x34736, 0x34a31, 0x34a73, 0x373d6, 0x3a31d, 0x3a73d, 0x3ad84, 0x4176b, + 0x436b4, 0x43846, 0x43b41, 0x43b84, 0x484d6, 0x4b41d, 0x4b84d, 0x4bd73, + 0x56c5d, 0x595d6, 0x5c51d, 0x5c95d, 0x62787, 0x62aba, 0x62efe, 0x6a3ba, + 0x6b4ab, 0x73a87, 0x73db8, 0x76b23, 0x78376, 0x78a72, 0x7a27d, 0x84b78, + 0x84da7, 0x86a24, 0x87486, 0x87b82, 0x8b28d, 0x9c2d9, 0xa2417, 0xa31ba, + 0xa73ba, 0xa7d4b, 0xab7a2, 0xb2318, 0xb41ab, 0xb84ab, 0xb8d3a, 0xba8b2, + 0xd17a7, 0xd18b8, 0xd2373, 0xd2484, 0xd36a3, 0xd46b4, + 0x1318163, 0x13439c9, 0x13813b8, 0x1417164, 0x14714a7, 0x1635f53, + 0x1645e54, 0x178ce7c, 0x179f9a7, 0x17d7bd7, 0x187cf8c, 0x189e9b8, + 0x18d8ad8, 0x19ae9ba, 0x19bf9ab, 0x19eae9d, 0x19eafe9, 0x19fbef9, + 0x19fbf9d, 0x1a686a6, 0x1a7aefe, 0x1a7cfca, 0x1acfca6, 0x1b676b6, + 0x1b8bfef, 0x1b8cecb, 0x1bcecb6, 0x1c9787c, 0x1c9caba, 0x1c9cefe, + 0x1ce7fec, 0x1cf8efc, 0x1d7ece7, 0x1d8fcf8, 0x1e9896e, 0x1e98c9e, + 0x1ec9bce, 0x1f9796f, 0x1f97c9f, 0x1fc9acf, 0x2132b23, 0x2142a24, + 0x21acfca, 0x21bcecb, 0x232b273, 0x2349e39, 0x235f573, 0x23d38d3, + 0x242a284, 0x2439f49, 0x245e584, 0x24d47d4, 0x257e587, 0x258f578, + 0x2595aba, 0x25e7e5d, 0x25e7fe5, 0x25f8ef5, 0x25f8f5d, 0x2714171, + 0x2737efe, 0x2739f97, 0x279f971, 0x2813181, 0x2848fef, 0x2849e98, + 0x289e981, 0x2953439, 0x2959787, 0x2959efe, 0x29e3fe9, 0x29f4ef9, + 0x2d3e9e3, 0x2d4f9f4, 0x2e5451e, 0x2e5495e, 0x2e9589e, 0x2f5351f, + 0x2f5395f, 0x2f9579f, 0x3181a31, 0x31f96af, 0x3435956, 0x343c95c, + 0x343c9c2, 0x346ce3c, 0x34739c9, 0x348a348, 0x349ca39, 0x349e396, + 0x349e3c9, 0x34a3595, 0x34ae9e3, 0x34ce31c, 0x34ce73c, 0x35f5362, + 0x35f56a3, 0x35f5736, 0x35f5a73, 0x362b232, 0x36a3efe, 0x37387b8, + 0x3763fef, 0x39ca3d9, 0x39e3d96, 0x39e3dc9, 0x3a3595d, 0x3a5f531, + 0x3a73fef, 0x3b238b2, 0x3ce3d1c, 0x3ce73dc, 0x3cecd84, 0x3d36bd3, + 0x3d38d36, 0x3d3bd31, 0x3db8d3d, 0x3dbd73d, 0x4171b41, 0x41e96be, + 0x436cf4c, 0x437b437, 0x43849c9, 0x439cb49, 0x439f496, 0x439f4c9, + 0x43b4595, 0x43bf9f4, 0x43cf41c, 0x43cf84c, 0x45e5462, 0x45e56b4, + 0x45e5846, 0x45e5b84, 0x462a242, 0x46b4fef, 0x48478a7, 0x4864efe, + 0x49cb4d9, 0x49f4d96, 0x49f4dc9, 0x4a247a2, 0x4b4595d, 0x4b5e541, + 0x4b84efe, 0x4cf4d1c, 0x4cf84dc, 0x4cfcd73, 0x4d46ad4, 0x4d47d46, + 0x4d4ad41, 0x4da7d4d, 0x4dad84d, 0x56ae5ba, 0x56bf5ab, 0x56c5343, + 0x56c5787, 0x56c5aba, 0x56c5efe, 0x5787956, 0x57ac587, 0x57e576d, + 0x57e5876, 0x57e5a7d, 0x57e5a87, 0x57e5db8, 0x58bc578, 0x58f5786, + 0x58f586d, 0x58f5b78, 0x58f5b8d, 0x58f5da7, 0x59578a7, 0x59587b8, + 0x595a7ad, 0x595a7ba, 0x595aba6, 0x595b8ab, 0x595b8bd, 0x5a7ac5d, + 0x5a7df5b, 0x5a7e5ba, 0x5ab7ac5, 0x5ae51ad, 0x5ae51ba, 0x5ae95ad, + 0x5ae95ba, 0x5b8bc5d, 0x5b8de5a, 0x5b8f5ab, 0x5ba8bc5, 0x5bf51ab, + 0x5bf51bd, 0x5bf95ab, 0x5bf95bd, 0x5c53473, 0x5c54384, 0x5c57387, + 0x5c58478, 0x5c78795, 0x5c7e587, 0x5c8f578, 0x5c95aba, 0x5c95efe, + 0x5ce7e5d, 0x5ce7fe5, 0x5cf8ef5, 0x5cf8f5d, 0x5e6ae5d, 0x5ea7fe5, + 0x5ead8f5, 0x5ef6ae5, 0x5ef7e56, 0x5efae51, 0x5efae95, 0x5f6bf5d, + 0x5fb8ef5, 0x5fbd7e5, 0x5fe6bf5, 0x5fe8f56, 0x5febf51, 0x5febf95, + 0x6279f97, 0x6289e98, 0x62a686a, 0x62b676b, 0x67b674b, 0x68a683a, + 0x6a3cfca, 0x6b4cecb, 0x6ce3ecd, 0x6ce3fec, 0x6cf4efc, 0x6cf4fcd, + 0x6e5c45e, 0x6ecb5ce, 0x6ecbc2e, 0x6f5c35f, 0x6fca5cf, 0x6fcac2f, + 0x7367b67, 0x7379c9d, 0x73879c9, 0x739f976, 0x73a9f97, 0x76fc23f, + 0x783ece7, 0x7875c51, 0x7879c92, 0x78ce7c2, 0x79f9a72, 0x7a27fef, + 0x7a7ba4b, 0x7c537dc, 0x7ce7dc2, 0x7d4bd7d, 0x7d7bd72, 0x8468a68, + 0x84789c9, 0x8489c9d, 0x849e986, 0x84b9e98, 0x86ec24e, 0x874fcf8, + 0x87cf8c2, 0x89e9b82, 0x8b28efe, 0x8b8ab3a, 0x8c548dc, 0x8cf8dc2, + 0x8d3ad8d, 0x8d8ad82, 0x93adf49, 0x94bde39, 0x9a359ba, 0x9abac92, + 0x9ae93ad, 0x9ae93ba, 0x9ae9a2d, 0x9ae9ba2, 0x9ae9d4b, 0x9b459ab, + 0x9bf94ab, 0x9bf94bd, 0x9bf9ab2, 0x9bf9b2d, 0x9bf9d3a, 0x9c9ab3a, + 0x9c9ba4b, 0x9e3afe9, 0x9e3dbf9, 0x9ef3e96, 0x9ef3ec9, 0x9efae92, + 0x9f4bef9, 0x9f4dae9, 0x9fe4f96, 0x9fe4fc9, 0x9febf92, 0xa2f517f, + 0xa31afef, 0xa31cfca, 0xa73cfca, 0xa7cfca2, 0xabac5c1, 0xb2e518e, + 0xb41befe, 0xb41cecb, 0xb84cecb, 0xb8cecb2, 0xc3435c1, 0xc73df8c, + 0xc84de7c, 0xce73fec, 0xce7d4fc, 0xcef3ec1, 0xcef7ec2, 0xcf84efc, + 0xcf8d3ec, 0xcfe4fc1, 0xcfe8fc2, 0xe5186ce, 0xe5495e6, 0xe54c95e, + 0xe5c45e1, 0xe9589e6, 0xe95c89e, 0xe96b25e, 0xe98c9e2, 0xec2419e, + 0xec51bce, 0xec95bce, 0xec9bce2, 0xefe5956, 0xefe9c92, 0xefec5c1, + 0xf5176cf, 0xf5395f6, 0xf53c95f, 0xf5c35f1, 0xf9579f6, 0xf95c79f, + 0xf96a25f, 0xf97c9f2, 0xfc2319f, 0xfc51acf, 0xfc95acf, 0xfc9acf2, + 0x13129f913, 0x13161b613, 0x131813c9c, 0x135c9bc53, 0x13813fcf8, + 0x13f913c9f, 0x14129e914, 0x14161a614, 0x141714c9c, 0x145c9ac54, + 0x14714ece7, 0x14e914c9e, 0x16715f517, 0x16815e518, 0x1715f51a7, + 0x17d7fcfd7, 0x1815e51b8, 0x18d8eced8, 0x197f97bf9, 0x198e98ae9, + 0x19edbede9, 0x19fdafdf9, 0x1a6a9f96a, 0x1b6b9e96b, 0x1ce7bcebc, + 0x1cf8acfac, 0x1e512815e, 0x1e914196e, 0x1ec6c86ce, 0x1f512715f, + 0x1f913196f, 0x1fc6c76cf, 0x2132cfc23, 0x2142cec24, 0x232b29e39, + 0x232cfc273, 0x23d3f9fd3, 0x242a29f49, 0x242cec284, 0x24d4e9ed4, + 0x253f538f5, 0x254e547e5, 0x25ed8ede5, 0x25fd7fdf5, 0x2712b2171, + 0x2812a2181, 0x29532b239, 0x29542a249, 0x29e389e89, 0x29f479f79, + 0x2a25f562a, 0x2ac9589ca, 0x2af52a95f, 0x2b25e562b, 0x2bc9579cb, + 0x2be52b95e, 0x2e52b251e, 0x2ec2642ce, 0x2f52a251f, 0x2fc2632cf, + 0x3181ce31c, 0x326286232, 0x34ece3484, 0x3589ca895, 0x359c89532, + 0x37387cf8c, 0x373f97c9f, 0x381835c51, 0x38dfcf83d, 0x39c2b2329, + 0x39efb3ef9, 0x3a19f9131, 0x3a343f9f4, 0x3a953f539, 0x3b23f9fb2, + 0x3bc5395c8, 0x3bdbe9e3d, 0x3c537f53c, 0x3cecd38d3, 0x3cf538f5c, + 0x3d36cfd3c, 0x3d39fd396, 0x3d39fd3c9, 0x3d3bd3595, 0x3d3fcfd31, + 0x3d9cb3d39, 0x3dfcfd73d, 0x3fc239c2f, 0x4171cf41c, 0x426276242, + 0x43fcf4373, 0x4579cb795, 0x459c79542, 0x471745c51, 0x47dece74d, + 0x48478ce7c, 0x484e98c9e, 0x49c2a2429, 0x49fea4fe9, 0x4a24e9ea2, + 0x4ac5495c7, 0x4adaf9f4d, 0x4b19e9141, 0x4b434e9e3, 0x4b954e549, + 0x4c548e54c, 0x4ce547e5c, 0x4cfcd47d4, 0x4d46ced4c, 0x4d49ed496, + 0x4d49ed4c9, 0x4d4ad4595, 0x4d4eced41, 0x4d9ca4d49, 0x4deced84d, + 0x4ec249c2e, 0x53f536bf5, 0x53f538f56, 0x53f53b8f5, 0x53f53bf51, + 0x54e546ae5, 0x54e547e56, 0x54e54a7e5, 0x54e54ae51, 0x5676b6c57, + 0x5686a6c58, 0x56ae5686a, 0x56bf5676b, 0x576b67956, 0x579c9bc53, + 0x579f795a7, 0x57abf57ab, 0x57dbc57d7, 0x57e567b67, 0x57e587b8b, + 0x57edb7d75, 0x586a68956, 0x589c9ac54, 0x589e895b8, 0x58bae58ba, + 0x58dac58d8, 0x58f568a68, 0x58f578a7a, 0x58fda8d85, 0x595ada8da, + 0x595bdb7db, 0x5acfc7ac5, 0x5ada8dea5, 0x5adf51ada, 0x5adf95ada, + 0x5aeafe8f5, 0x5bcec8bc5, 0x5bdb7dfb5, 0x5bde51bdb, 0x5bde95bdb, + 0x5bfbef7e5, 0x5c53d38d3, 0x5c54d47d4, 0x5cafca8f5, 0x5cbecb7e5, + 0x5ded8de56, 0x5dedb8de5, 0x5dfd7df56, 0x5dfda7df5, 0x5e5945ae9, + 0x5e9589ae9, 0x5ed6bde5d, 0x5f5935bf9, 0x5f9579bf9, 0x5fd6adf5d, + 0x62a69f96a, 0x62b69e96b, 0x67954c597, 0x67fc675cf, 0x68953c598, + 0x68ec685ce, 0x6a369f96a, 0x6b469e96b, 0x6cafca4fc, 0x6cbecb3ec, + 0x6ec686c2e, 0x6fc676c2f, 0x736cfc676, 0x79c5bc971, 0x7cef47efc, + 0x7d74d79c9, 0x7d75cfd75, 0x7d7cfd7c2, 0x7f517c51f, 0x846cec686, + 0x89c5ac981, 0x8cfe38fec, 0x8d83d89c9, 0x8d85ced85, 0x8d8ced8c2, + 0x8e518c51e, 0x97f974bf9, 0x97f974f96, 0x97f97bf92, 0x98e983ae9, + 0x98e983e96, 0x98e98ae92, 0x9acfac93a, 0x9ae9ba4b4, 0x9aed4ada9, + 0x9bcebc94b, 0x9bf9ab3a3, 0x9bfd3bdb9, 0x9ded4bde9, 0x9dedbde92, + 0x9dfd3adf9, 0x9dfdadf92, 0x9e398c9ec, 0x9e3bcebc9, 0x9f497c9fc, + 0x9f4acfac9, 0xa725f52a2, 0xac5945ca6, 0xaf96a596f, 0xb825e52b2, + 0xbc5935cb6, 0xbe96b596e, 0xcafca4fc1, 0xcafca84fc, 0xcafca8fc2, + 0xcbecb3ec1, 0xcbecb73ec, 0xcbecb7ec2, 0x13161cfc613, + 0x1391f913bf9, 0x14161cec614, 0x1491e914ae9, 0x16a615f516a, + 0x16b615e516b, 0x17e7fe7bfe7, 0x18f8ef8aef8, 0x21712cfc217, + 0x21812cec218, 0x21a21f912af, 0x21b21e912be, 0x23e3fe38fe3, + 0x24f4ef47ef4, 0x25e52b257e5, 0x25f52a258f5, 0x32629f96232, + 0x35295895325, 0x353c51bc535, 0x3598a359535, 0x35bc5358bc5, + 0x395cb35c593, 0x3a353f538f5, 0x3c2fc238fc2, 0x3c5953895c3, + 0x3e3fe3bfe31, 0x3e3fe6bfe3e, 0x3e3febfe73e, 0x42629e96242, + 0x45295795425, 0x454c51ac545, 0x4597b459545, 0x45ac5457ac5, + 0x495ca45c594, 0x4b454e547e5, 0x4c2ec247ec2, 0x4c5954795c4, + 0x4f4ef4aef41, 0x4f4ef6aef4f, 0x4f4efaef84f, 0x535c537bc53, + 0x539589535b8, 0x53f53bf5373, 0x545c548ac54, 0x549579545a7, + 0x54e54ae5484, 0x5aca9c89ca5, 0x5bcb9c79cb5, 0x5e51815ae51, + 0x5f51715bf51, 0x62762f5267f, 0x62862e5268e, 0x67c6fc674fc, + 0x68c6ec683ec, 0x73797f97bf9, 0x73cbc97c9bc, 0x791c9bc9719, + 0x79759645979, 0x7e7fe4fea7e, 0x7e7fe74fe76, 0x84898e98ae9, + 0x84cac98c9ac, 0x891c9ac9819, 0x89859635989, 0x8f8ef3efb8f, + 0x8f8ef83ef86, 0x97c9bc9794b, 0x97f974f97a7, 0x98c9ac9893a, + 0x98e983e98b8, 0x9e396b69e96, 0x9f496a69f96, 0xa7acafca4fc, + 0xac65c45ca6c, 0xaca9c289cac, 0xaeafe8fe3ae, 0xaeafea8fea2, + 0xb8bcbecb3ec, 0xbc65c35cb6c, 0xbcb9c279cbc, 0xbfbef7ef4bf, + 0xbfbefb7efb2, 0xcafca8fca3a, 0xcbecb7ecb4b, + }; + + map[33] = { + 0x16df, 0x16ed, 0x16fe, 0x21df, 0x21ed, 0x21fe, 0x62df, 0x62ed, 0x62fe, + 0x1343f6, 0x1454e6, 0x1535d6, 0x163543, 0x163f53, 0x164e34, 0x165d45, + 0x16787f, 0x16797d, 0x167987, 0x16898e, 0x16abaf, 0x16acad, 0x16bcbe, + 0x16dbcb, 0x16eaba, 0x16fcac, 0x178fa7, 0x1796ba, 0x1798a7, 0x179a7d, + 0x17a7df, 0x17a7ed, 0x17af97, 0x17dbf9, 0x1876cb, 0x1879b8, 0x187b8f, + 0x189eb8, 0x18b8df, 0x18b8fe, 0x18be78, 0x18fce7, 0x197dc9, 0x1986ac, + 0x1987c9, 0x198c9e, 0x19c9ed, 0x19c9fe, 0x19cd89, 0x19ead8, 0x1a7afe, + 0x1a7baf, 0x1a7cad, 0x1a7cba, 0x1a7fca, 0x1ac687, 0x1acba6, 0x1ad8fc, + 0x1b8abf, 0x1b8acb, 0x1b8bed, 0x1b8cbe, 0x1b8eab, 0x1ba698, 0x1bf9ea, + 0x1c9acd, 0x1c9bac, 0x1c9bce, 0x1c9cdf, 0x1c9dbc, 0x1cb679, 0x1ce7db, + 0x1d8986, 0x1d89b8, 0x1db8cb, 0x1e7876, 0x1e78a7, 0x1ea7ba, 0x1f9796, + 0x1f97c9, 0x1fc9ac, 0x21343f, 0x21353d, 0x213543, 0x21454e, 0x21787f, + 0x21797d, 0x21898e, 0x21abaf, 0x21acba, 0x21afca, 0x21bcbe, 0x21beab, + 0x21cacd, 0x21cdbc, 0x21d898, 0x21e787, 0x21f979, 0x234f73, 0x235187, + 0x235473, 0x23573d, 0x2373df, 0x2373ed, 0x237f53, 0x23d8f5, 0x243198, + 0x243584, 0x24384f, 0x245e84, 0x2484df, 0x2484fe, 0x248e34, 0x24f9e3, + 0x253d95, 0x254179, 0x254395, 0x25495e, 0x2595ed, 0x2595fe, 0x259d45, + 0x25e7d4, 0x2737fe, 0x27387f, 0x27397d, 0x273987, 0x273f97, 0x279143, + 0x279871, 0x27d4f9, 0x28478f, 0x284798, 0x2848ed, 0x28498e, 0x284e78, + 0x287154, 0x28f5e7, 0x29579d, 0x295879, 0x29589e, 0x2959df, 0x295d89, + 0x298135, 0x29e3d8, 0x2d4541, 0x2d4584, 0x2d8498, 0x2e3431, 0x2e3473, + 0x2e7387, 0x2f5351, 0x2f5395, 0x2f9579, 0x3186af, 0x3196ad, 0x3196ba, + 0x3196db, 0x31986a, 0x319ad8, 0x319b8a, 0x31dbf9, 0x31f96a, 0x34f362, + 0x34f6a3, 0x34f736, 0x34fa31, 0x34fa73, 0x3516ba, 0x35186a, 0x351876, + 0x3518a7, 0x351db8, 0x3521ba, 0x354362, 0x3546a3, 0x354a31, 0x354a73, + 0x3562ba, 0x356a3d, 0x3573d6, 0x35a31d, 0x35a73d, 0x35ad84, 0x36a3df, + 0x36a3ed, 0x36a3fe, 0x3763fe, 0x376543, 0x376f53, 0x3956ba, 0x39586a, + 0x395ba8, 0x3a31df, 0x3a73ed, 0x3a73fe, 0x3a7f53, 0x3ad84f, 0x3ad8f5, + 0x3adf95, 0x3aed95, 0x3af531, 0x3afe95, 0x3ba8f5, 0x3baf95, 0x3d6bf5, + 0x3d86f5, 0x3db8f5, 0x3db985, 0x3dbf51, 0x3dbf95, 0x3f5362, 0x4176bf, + 0x4176cb, 0x4176fc, 0x41796b, 0x417bf9, 0x417c9b, 0x4196be, 0x41e76b, + 0x41fce7, 0x4316cb, 0x43196b, 0x431986, 0x4319b8, 0x431fc9, 0x4321cb, + 0x4356b4, 0x435b41, 0x435b84, 0x4362cb, 0x436b4f, 0x4384f6, 0x43b41f, + 0x43b84f, 0x43bf95, 0x45e462, 0x45e6b4, 0x45e846, 0x45eb41, 0x45eb84, + 0x46b4df, 0x46b4ed, 0x46b4fe, 0x4736cb, 0x47396b, 0x473cb9, 0x486354, + 0x4864ed, 0x486e34, 0x4b41fe, 0x4b84df, 0x4b84ed, 0x4b8e34, 0x4bdf73, + 0x4be341, 0x4bed73, 0x4bf95e, 0x4bf9e3, 0x4bfe73, 0x4cb9e3, 0x4cbe73, + 0x4e3462, 0x4f6ce3, 0x4f96e3, 0x4fc793, 0x4fc9e3, 0x4fce31, 0x4fce73, + 0x5176cd, 0x5186ac, 0x5186ce, 0x5186ea, 0x51876c, 0x518a7c, 0x518ce7, + 0x51d86c, 0x51ead8, 0x53d562, 0x53d6c5, 0x53d956, 0x53dc51, 0x53dc95, + 0x5416ac, 0x54176c, 0x541796, 0x5417c9, 0x541ea7, 0x5421ac, 0x5436c5, + 0x543c51, 0x543c95, 0x5462ac, 0x546c5e, 0x5495e6, 0x54c51e, 0x54c95e, + 0x54ce73, 0x56c5df, 0x56c5ed, 0x56c5fe, 0x5846ac, 0x58476c, 0x584ac7, + 0x596435, 0x5965df, 0x596d45, 0x5ac7d4, 0x5acd84, 0x5c51ed, 0x5c95df, + 0x5c95fe, 0x5c9d45, 0x5cd451, 0x5cdf84, 0x5ce73d, 0x5ce7d4, 0x5ced84, + 0x5cfe84, 0x5d4562, 0x5e6ad4, 0x5e76d4, 0x5ea7d4, 0x5ea874, 0x5ead41, + 0x5ead84, 0x62787f, 0x627987, 0x627f97, 0x62898e, 0x628e78, 0x62979d, + 0x629d89, 0x62abaf, 0x62acad, 0x62acba, 0x62bcbe, 0x63af53, 0x64be34, + 0x65cd45, 0x6a3baf, 0x6a3cad, 0x6a3cba, 0x6a3fca, 0x6ac243, 0x6ad4fc, + 0x6b4abf, 0x6b4acb, 0x6b4cbe, 0x6b4eab, 0x6ba254, 0x6bf5ea, 0x6c5acd, + 0x6c5bac, 0x6c5bce, 0x6c5dbc, 0x6cb235, 0x6ce3db, 0x6d4b54, 0x6dbc4b, + 0x6dbcb2, 0x6e3a43, 0x6eab3a, 0x6eaba2, 0x6f5c35, 0x6fca5c, 0x6fcac2, + 0x7376df, 0x73876f, 0x7396ba, 0x7397d6, 0x739876, 0x73a97d, 0x73a987, + 0x73af97, 0x73d6cb, 0x73db8f, 0x73db98, 0x73dbf9, 0x73dfc9, 0x73edc9, + 0x73f976, 0x73fec9, 0x743bf9, 0x76b23f, 0x76c23d, 0x76c243, 0x76c2d4, + 0x76cb23, 0x76d4fc, 0x76fc23, 0x78f3a7, 0x78fa72, 0x791643, 0x796243, + 0x796b23, 0x796ba2, 0x796d4b, 0x79a7d2, 0x7a27fe, 0x7a2987, 0x7a2f97, + 0x7c9243, 0x7c9b23, 0x7d4bf9, 0x7d4cb9, 0x7d4f96, 0x7d4fc9, 0x7db2f9, + 0x8476cb, 0x8478f6, 0x847986, 0x8486fe, 0x84986e, 0x84b78f, 0x84b798, + 0x84be78, 0x84dfa7, 0x84e786, 0x84eda7, 0x84f6ac, 0x84fc79, 0x84fc9e, + 0x84fce7, 0x84fea7, 0x854ce7, 0x86a24f, 0x86a254, 0x86a2f5, 0x86ac24, + 0x86c24e, 0x86ea24, 0x86f5ea, 0x871654, 0x876254, 0x876c24, 0x876cb2, + 0x876f5c, 0x87b8f2, 0x89e4b8, 0x89eb82, 0x8a7254, 0x8a7c24, 0x8b2798, + 0x8b28ed, 0x8b2e78, 0x8f5ac7, 0x8f5ce7, 0x8f5e76, 0x8f5ea7, 0x8fc2e7, + 0x935ad8, 0x95796d, 0x9586ac, 0x958796, 0x9589e6, 0x9596ed, 0x95c879, + 0x95c89e, 0x95cd89, 0x95d896, 0x95dfb8, 0x95e6ba, 0x95ea7d, 0x95ea87, + 0x95ead8, 0x95edb8, 0x95feb8, 0x96a25d, 0x96b235, 0x96b25e, 0x96b2e3, + 0x96ba25, 0x96db25, 0x96e3db, 0x97d5c9, 0x97dc92, 0x981635, 0x986235, + 0x986a25, 0x986ac2, 0x986e3a, 0x98c9e2, 0x9b8235, 0x9b8a25, 0x9c2879, + 0x9c29df, 0x9c2d89, 0x9e3ad8, 0x9e3ba8, 0x9e3d86, 0x9e3db8, 0x9ea2d8, + 0xa2417f, 0xa2517d, 0xa25187, 0xa251d8, 0xa25417, 0xa257d4, 0xa2d8f5, + 0xa2f517, 0xa31afe, 0xa31baf, 0xa31cad, 0xa31cba, 0xa31fca, 0xa5c417, + 0xa73cad, 0xa73cba, 0xa73fca, 0xa7a2df, 0xa7ba2f, 0xa7c243, 0xa7cad2, + 0xa7cba2, 0xa7d4bf, 0xa7d4cb, 0xa7d4fc, 0xa7df5c, 0xa7ed5c, 0xa7fca2, + 0xa7fe5c, 0xa874fc, 0xabf73a, 0xac2187, 0xac2417, 0xac2431, 0xac2d84, + 0xac6287, 0xad41fc, 0xad84fc, 0xad8f5c, 0xad8fc2, 0xb2318f, 0xb23198, + 0xb231f9, 0xb23518, 0xb238f5, 0xb2518e, 0xb2e318, 0xb2f9e3, 0xb3a518, + 0xb41abf, 0xb41acb, 0xb41bed, 0xb41cbe, 0xb41eab, 0xb84abf, 0xb84acb, + 0xb84eab, 0xb8a254, 0xb8abf2, 0xb8acb2, 0xb8b2fe, 0xb8cb2e, 0xb8df3a, + 0xb8eab2, 0xb8ed3a, 0xb8f5ac, 0xb8f5ce, 0xb8f5ea, 0xb8fe3a, 0xb985ea, + 0xba2198, 0xba2518, 0xba2541, 0xba2f95, 0xba6298, 0xbce84b, 0xbf51ea, + 0xbf95ea, 0xbf9e3a, 0xbf9ea2, 0xc2319d, 0xc24179, 0xc2419e, 0xc241e7, + 0xc24319, 0xc249e3, 0xc2d419, 0xc2e7d4, 0xc4b319, 0xc51acd, 0xc51bac, + 0xc51bce, 0xc51cdf, 0xc51dbc, 0xc793db, 0xc95bac, 0xc95bce, 0xc95dbc, + 0xc9ac2d, 0xc9b235, 0xc9bac2, 0xc9bce2, 0xc9c2ed, 0xc9dbc2, 0xc9df4b, + 0xc9e3ad, 0xc9e3ba, 0xc9e3db, 0xc9ed4b, 0xc9fe4b, 0xcad95c, 0xcb2179, + 0xcb2319, 0xcb2351, 0xcb2e73, 0xcb6279, 0xce31db, 0xce73db, 0xce7d4b, + 0xce7db2, 0xd4196b, 0xd45846, 0xd45b41, 0xd45b84, 0xd4b4f1, 0xd4bf95, + 0xd848f6, 0xd84986, 0xd84b98, 0xd84fc9, 0xd86c24, 0xd89b82, 0xdb2518, + 0xdb41cb, 0xdb84cb, 0xdb8bf2, 0xdb8cb2, 0xdb8f5c, 0xdf3a73, 0xe3186a, + 0xe34736, 0xe34a31, 0xe34a73, 0xe3a3d1, 0xe3ad84, 0xe737d6, 0xe73876, + 0xe73a87, 0xe73db8, 0xe76b23, 0xe78a72, 0xea2417, 0xea31ba, 0xea73ba, + 0xea7ad2, 0xea7ba2, 0xea7d4b, 0xed5c95, 0xf5176c, 0xf53956, 0xf53c51, + 0xf53c95, 0xf5c5e1, 0xf5ce73, 0xf95796, 0xf959e6, 0xf95c79, 0xf95ea7, + 0xf96a25, 0xf97c92, 0xfc2319, 0xfc51ac, 0xfc95ac, 0xfc9ac2, 0xfc9ce2, + 0xfc9e3a, 0xfe4b84, 0x1312813f, 0x13129813, 0x1312f913, 0x13191436, + 0x13196b23, 0x13198163, 0x131f9163, 0x1321fc23, 0x132c6813, 0x135813b8, + 0x1369193d, 0x138136cb, 0x138139b8, 0x13913dc9, 0x139143c9, 0x13cb6139, + 0x13f913c9, 0x14127914, 0x1412914e, 0x1412e714, 0x14171546, 0x14176c24, + 0x14179164, 0x141e7164, 0x1421ea24, 0x142a6914, 0x1467174f, 0x14714fa7, + 0x147154a7, 0x149146ac, 0x149147c9, 0x14ac6147, 0x14e714a7, 0x1512715d, + 0x15128715, 0x1512d815, 0x15181356, 0x15186a25, 0x15187165, 0x151d8165, + 0x1521db25, 0x152b6715, 0x1568185e, 0x157156ba, 0x157158a7, 0x15815eb8, + 0x15ba6158, 0x15d815b8, 0x1671f517, 0x1676fc67, 0x1681e318, 0x1686ea68, + 0x1691d419, 0x1696db69, 0x16a986a9, 0x16ac86a8, 0x16af96a9, 0x16b796b7, + 0x16ba96b9, 0x16be76b7, 0x16c876c8, 0x16cb76c7, 0x16cd86c8, 0x17151a7d, + 0x176bf676, 0x176c67d6, 0x17941a74, 0x1797a7ba, 0x179bd7bd, 0x17af5175, + 0x17ce7bce, 0x17cecf7e, 0x17e78ce7, 0x18131b8f, 0x186a68f6, 0x186ce686, + 0x18751b85, 0x1878b8cb, 0x187cf8cf, 0x18ad8cad, 0x18adae8d, 0x18be3183, + 0x18d89ad8, 0x19141c9e, 0x196ad696, 0x196b69e6, 0x19831c93, 0x1989c9ac, + 0x198ae9ae, 0x19bf9abf, 0x19bfbd9f, 0x19cd4194, 0x19f97bf9, 0x1a78ca78, + 0x1ab89ab8, 0x1ae9aeba, 0x1afe9aea, 0x1bc97bc9, 0x1bd7bdcb, 0x1bed7bdb, + 0x1cdf8cfc, 0x1cf8cfac, 0x1d412914, 0x1d421c24, 0x1d7dbd7f, 0x1d7ecde7, + 0x1d8dad8f, 0x1dae9dea, 0x1e312813, 0x1e321b23, 0x1e9fbef9, 0x1ecf8efc, + 0x1f512715, 0x1f521a25, 0x2132b23f, 0x2132cb23, 0x2135b23b, 0x2142ac24, + 0x2142c24e, 0x2143c24c, 0x2152a25d, 0x2152ba25, 0x2154a25a, 0x21754175, + 0x21835183, 0x21943194, 0x231913d1, 0x232bf273, 0x232c2187, 0x232c273d, + 0x23537387, 0x2358d38d, 0x235b273b, 0x237b23cb, 0x237c243c, 0x237fc23c, + 0x239e389e, 0x239e9f3e, 0x23e349e3, 0x241714f1, 0x242a2198, 0x242a284f, + 0x242ce284, 0x24348498, 0x2439f49f, 0x243c284c, 0x247d497d, 0x247d7e4d, + 0x248a254a, 0x248ac24a, 0x248ea24a, 0x24d457d4, 0x251815e1, 0x252ad295, + 0x252b2179, 0x252b295e, 0x25459579, 0x2547e57e, 0x254a295a, 0x258f578f, + 0x258f8d5f, 0x259a25ba, 0x259b235b, 0x259db25b, 0x25f538f5, 0x27349734, + 0x27845784, 0x27912b23, 0x27e57e87, 0x27fe57e7, 0x28712c24, 0x28953895, + 0x28d38d98, 0x28ed38d8, 0x29812a25, 0x29df49f9, 0x29f49f79, 0x2a24f62a, + 0x2a25462a, 0x2a26f52a, 0x2a62f96a, 0x2af52a95, 0x2b23562b, 0x2b25e62b, + 0x2b26e32b, 0x2b62e76b, 0x2be32b73, 0x2c23d62c, 0x2c24362c, 0x2c26d42c, + 0x2c62d86c, 0x2cd42c84, 0x2d3d8d3f, 0x2d3e9de3, 0x2d4d7d4f, 0x2d7e5de7, + 0x2db2562b, 0x2db6296b, 0x2e5f8ef5, 0x2e9f4ef9, 0x2ea2462a, 0x2ea6286a, + 0x2fc2362c, 0x2fc6276c, 0x3138163f, 0x314a3914, 0x319121ba, 0x319a313d, + 0x321c232d, 0x3436b4cb, 0x34384986, 0x34384b98, 0x343b41cb, 0x343b84cb, + 0x3489a348, 0x34a3484f, 0x3516b676, 0x3518a313, 0x35373876, 0x35373a87, + 0x353a31ba, 0x353a73ba, 0x3548a348, 0x354a3595, 0x3562686a, 0x356a3bab, + 0x356bd3bd, 0x357387b8, 0x358ad38d, 0x358d38d6, 0x358d3b8d, 0x359a359d, + 0x35b238b2, 0x35bd73bd, 0x35d3bd31, 0x362bf232, 0x362cb232, 0x36bdb3ed, + 0x36ce3bce, 0x373879b8, 0x37387b8f, 0x37397dc9, 0x373987c9, 0x373e87b8, + 0x373f97c9, 0x3818fa31, 0x38d38df6, 0x38d3b8df, 0x38d3e8d6, 0x38d3eb8d, + 0x398623c2, 0x39e389e6, 0x39e38c9e, 0x39e39ed6, 0x39e39fe6, 0x39e3c9ed, + 0x39e3c9fe, 0x39e3feb8, 0x3a13f913, 0x3a348e34, 0x3a813981, 0x3a9e389e, + 0x3ad38d3f, 0x3ad9f49f, 0x3af53959, 0x3af9e39e, 0x3afe8f58, 0x3b236298, + 0x3b238b2f, 0x3b238cb2, 0x3b8fe3ce, 0x3bd31bdf, 0x3bd31ebd, 0x3bd73bdf, + 0x3bd73ebd, 0x3bdbed95, 0x3c23d9c2, 0x3c2439c2, 0x3c9e3bce, 0x3cb239c2, + 0x3ce31bce, 0x3ce31ced, 0x3ce31cfe, 0x3ce73bce, 0x3ce73ced, 0x3ce73cfe, + 0x3d36bdf3, 0x3dbf9e3e, 0x3e346ce3, 0x3e349e36, 0x3e34ce31, 0x3e34ce73, + 0x3e73ce87, 0x3ecf8d3e, 0x3ed6ce3e, 0x3fc239c2, 0x4149164e, 0x415b4715, + 0x417121cb, 0x417b414f, 0x421a242f, 0x4316c686, 0x4319b414, 0x4359b459, + 0x435b4373, 0x4362696b, 0x436cf4cf, 0x437b437f, 0x438498c9, 0x439bf49f, + 0x439f49f6, 0x439f4c9f, 0x43cf84cf, 0x43f4cf41, 0x4546c5ac, 0x45495796, + 0x45495c79, 0x454c51ac, 0x454c95ac, 0x4597b459, 0x45b4595e, 0x462ac242, + 0x462ce242, 0x46ad4cad, 0x46cfc4df, 0x479624a2, 0x47d47df6, 0x47d47ed6, + 0x47d497d6, 0x47d49a7d, 0x47d4a7df, 0x47d4a7ed, 0x47d4edc9, 0x48478fa7, + 0x484798a7, 0x484987c9, 0x48498c9e, 0x484d98c9, 0x484e78a7, 0x4919eb41, + 0x49f49fe6, 0x49f4c9fe, 0x49f4d9f6, 0x49f4dc9f, 0x4a24f7a2, 0x4a2547a2, + 0x4a7d4cad, 0x4ac247a2, 0x4ad41adf, 0x4ad41aed, 0x4ad41cad, 0x4ad84adf, + 0x4ad84aed, 0x4ad84cad, 0x4b14e714, 0x4b459d45, 0x4b7d497d, 0x4b914791, + 0x4be34737, 0x4be7d47d, 0x4bed9e39, 0x4bf49f4e, 0x4bf7e57e, 0x4c246279, + 0x4c249ac2, 0x4c249c2e, 0x4c9ed4ad, 0x4cf41cfe, 0x4cf41dcf, 0x4cf84cfe, + 0x4cf84dcf, 0x4cfcdf73, 0x4d456ad4, 0x4d457d46, 0x4d45ad41, 0x4d45ad84, + 0x4d84ad98, 0x4dae9f4d, 0x4df6ad4d, 0x4ea247a2, 0x4f46cfe4, 0x4fce7d4d, + 0x513c5813, 0x5157165d, 0x518121ac, 0x518c515e, 0x521b252e, 0x5378c537, + 0x53c5373d, 0x5416a696, 0x5417c515, 0x5437c537, 0x543c5484, 0x5462676c, + 0x546ae5ae, 0x547ce57e, 0x547e57e6, 0x547e5a7e, 0x548c548e, 0x549579a7, + 0x54ae95ae, 0x54e5ae51, 0x562ad252, 0x562ba252, 0x56aea5fe, 0x56bf5abf, + 0x5717dc51, 0x57e57ed6, 0x57e5a7ed, 0x57e5f7e6, 0x57e5fa7e, 0x587625b2, + 0x58f578f6, 0x58f57b8f, 0x58f58df6, 0x58f58fe6, 0x58f5b8df, 0x58f5b8fe, + 0x58f5dfa7, 0x595798a7, 0x59579a7d, 0x595879b8, 0x59589eb8, 0x595d89b8, + 0x595f79a7, 0x5a256287, 0x5a257a2d, 0x5a257ba2, 0x5a7df5bf, 0x5ae51aed, + 0x5ae51fae, 0x5ae95aed, 0x5ae95fae, 0x5aeafe84, 0x5b25e8b2, 0x5b8f5abf, + 0x5ba258b2, 0x5bf51abf, 0x5bf51bdf, 0x5bf51bfe, 0x5bf95abf, 0x5bf95bdf, + 0x5bf95bfe, 0x5c15d815, 0x5c537f53, 0x5c715871, 0x5c8f578f, 0x5cd45848, + 0x5cd8f58f, 0x5cdf7d47, 0x5ce57e5d, 0x5ce8d38d, 0x5db258b2, 0x5e56aed5, + 0x5ead8f5f, 0x5f536bf5, 0x5f538f56, 0x5f53bf51, 0x5f53bf95, 0x5f95bf79, + 0x5fbd7e5f, 0x5fe6bf5f, 0x62a686af, 0x62a6986a, 0x62ac86a8, 0x62b6796b, + 0x62b696be, 0x62ba96b9, 0x62c676cd, 0x62c6876c, 0x62cb76c7, 0x63fec3e3, + 0x64eda4d4, 0x65dfb5f5, 0x679b674b, 0x67b67254, 0x67b67c4b, 0x67c67d5c, + 0x67c6875c, 0x67fc675c, 0x68a68f3a, 0x68a6983a, 0x68c68235, 0x68c68a5c, + 0x68ea683a, 0x69a69243, 0x69a69b3a, 0x69b69e4b, 0x69db694b, 0x6a34ca34, + 0x6a3696ad, 0x6a3c86a8, 0x6a3f96a9, 0x6aeab5ea, 0x6b45ab45, 0x6b4676bf, + 0x6b4a96b9, 0x6b4e76b7, 0x6bdbc3db, 0x6c53bc53, 0x6c5686ce, 0x6c5b76c7, + 0x6c5d86c8, 0x6cfca4fc, 0x73497346, 0x734a9734, 0x7367b67f, 0x7367c67d, + 0x7367fc67, 0x7387b8cb, 0x73967b67, 0x73a97aba, 0x73b67cb6, 0x73bdb97d, + 0x73c687c6, 0x73dcf8cf, 0x73febf9b, 0x767b627f, 0x76c61643, 0x7845b784, + 0x78748654, 0x787b84cb, 0x787b8cb2, 0x797a7ba2, 0x79a7ba4b, 0x79bd7bd2, + 0x7a7ba4bf, 0x7a7bac4b, 0x7a7cad5c, 0x7a7cba5c, 0x7a7eba4b, 0x7a7fca5c, + 0x7bd74bdf, 0x7bd7bdf2, 0x7bd7ebd2, 0x7cb21571, 0x7ce75bce, 0x7ce75cfe, + 0x7ce7bce2, 0x7ce7ced2, 0x7ce7cfe2, 0x7ce7fe4b, 0x7e57e876, 0x7e5a7eba, + 0x7e785ea7, 0x7e78ce72, 0x7f517c51, 0x8468a68f, 0x8468c68e, 0x8468ea68, + 0x84768c68, 0x8498c9ac, 0x84a698a6, 0x84c68ac6, 0x84cfc78f, 0x84edce7c, + 0x84fae9ae, 0x868c628e, 0x86a61654, 0x87b8cb5c, 0x87cf8cf2, 0x8953c895, + 0x89859635, 0x898c95ac, 0x898c9ac2, 0x8ac21381, 0x8ad83aed, 0x8ad83cad, + 0x8ad8adf2, 0x8ad8aed2, 0x8ad8cad2, 0x8ad8ed5c, 0x8b8abf3a, 0x8b8acb3a, + 0x8b8cb5ce, 0x8b8cba5c, 0x8b8dcb5c, 0x8b8eab3a, 0x8cf85cfe, 0x8cf8cfe2, + 0x8cf8dcf2, 0x8d38d986, 0x8d3b8dcb, 0x8d893db8, 0x8d89ad82, 0x8e318a31, + 0x9569a69d, 0x9569b69e, 0x9569db69, 0x9579a7ba, 0x95869a69, 0x95a69ba6, + 0x95aea89e, 0x95b679b6, 0x95dfad8a, 0x95ebd7bd, 0x969a629d, 0x96b61635, + 0x98ae9ae2, 0x98c9ac3a, 0x9ae93aed, 0x9ae9aed2, 0x9ae9fae2, 0x9ba21491, + 0x9bf94abf, 0x9bf94bdf, 0x9bf9abf2, 0x9bf9bdf2, 0x9bf9bfe2, 0x9bf9df3a, + 0x9c9ac3ad, 0x9c9acb3a, 0x9c9bac4b, 0x9c9bce4b, 0x9c9dbc4b, 0x9c9fac3a, + 0x9d419b41, 0x9f49f796, 0x9f4c9fac, 0x9f974fc9, 0x9f97bf92, 0xa314ca34, + 0xa348ca34, 0xa72af52a, 0xa734ca34, 0xa73ca787, 0xa78ca782, 0xa7ba4b54, + 0xa7fe4fc4, 0xab45ab41, 0xab45ab84, 0xab894ab8, 0xaba8b298, 0xae349ae3, + 0xae9aeba2, 0xaeab5ea1, 0xaeab9e3a, 0xaf96a596, 0xb459ab45, 0xb82be32b, + 0xb8cb5c35, 0xb8ed5ea5, 0xbc53bc51, 0xbc53bc95, 0xbc975bc9, 0xbcb9c279, + 0xbd457bd4, 0xbd7bdcb2, 0xbdbc3db1, 0xbdbc7d4b, 0xbe76b376, 0xc537bc53, + 0xc92cd42c, 0xc9ac3a43, 0xc9df3db3, 0xcd86c486, 0xcf538cf5, 0xcf8cfac2, + 0xcfca4fc1, 0xcfca8f5c, 0xd38d3fc9, 0xd3a8d398, 0xd3bd73cb, 0xd457d4a7, + 0xd4adaf95, 0xd7bd7f5c, 0xe349e3c9, 0xe3cecd84, 0xe57e5db8, 0xe5ae95ba, + 0xe5c7e587, 0xe9ae9d4b, 0xf49f4ea7, 0xf4b9f479, 0xf4cf84ac, 0xf538f5b8, + 0xf5bfbe73, 0xf8cf8e3a, 0x13161b613f, 0x13161c613d, 0x13161c6143, + 0x13161fc613, 0x131813b8cb, 0x13218c2138, 0x135161b613, 0x13813cfc8f, + 0x139f913bf9, 0x13bc913bc9, 0x14161a614f, 0x14161a6154, 0x14161c614e, + 0x14161ea614, 0x141914c9ac, 0x14219a2149, 0x147e714ce7, 0x14914aea9e, + 0x14a71ca714, 0x15161a615d, 0x15161b615e, 0x15161db615, 0x151715a7ba, + 0x15217b2157, 0x15715bdb7d, 0x158d815ad8, 0x15ab815ab8, 0x16a61f516a, + 0x16b61e316b, 0x16c61d416c, 0x17bfe7bfeb, 0x17cfdfc7df, 0x18aefea8fe, + 0x18ced8cedc, 0x19adf9adfa, 0x19bdedb9ed, 0x21712fc217, 0x21812ea218, + 0x21912db219, 0x21a21812af, 0x21a21912ad, 0x21a21f912a, 0x21b21712bf, + 0x21b21912be, 0x21b21e712b, 0x21c21712cd, 0x21c21812ce, 0x21c21d812c, + 0x232c237387, 0x232c28d38d, 0x238fe38fe8, 0x239fdf93df, 0x23e32b29e3, + 0x242a248498, 0x242a29f49f, 0x247efe74fe, 0x249ed49ed9, 0x24d42c27d4, + 0x252b259579, 0x252b27e57e, 0x257df57df7, 0x258ded85ed, 0x25f52a28f5, + 0x2a212912ba, 0x2a624962a4, 0x2a8952a895, 0x2ac212812a, 0x2b212712cb, + 0x2b625762b5, 0x2b7329732b, 0x2c623862c3, 0x2c7842c784, 0x3132b9123b, + 0x3191d3bd31, 0x326239623d, 0x326286232f, 0x3262962432, 0x3262f96232, + 0x32b238b298, 0x3436b496b9, 0x3437b437cb, 0x3438468c68, 0x3439cb49cb, + 0x34a349f49f, 0x3526286232, 0x3537367b67, 0x353a3595ba, 0x353a68a63a, + 0x353ba38ba3, 0x359d3bd359, 0x36ce3686ce, 0x37349734c9, 0x37387cf8cf, + 0x3739f97bf9, 0x373bc97bc9, 0x3816c31681, 0x389c2389c2, 0x38bce38bce, + 0x38dfc3df8d, 0x38fe38fe86, 0x39e369b69e, 0x39e389eb8b, 0x39efb3e9f9, + 0x39fd3fd9f6, 0x3a13913aba, 0x3a35f538f5, 0x3a39538953, 0x3aefe8fe3e, + 0x3b239fb29f, 0x3b26932b6b, 0x3bdbed39e3, 0x3bfe3bfe1b, 0x3cecde8d3e, + 0x3cfc238fc2, 0x3d36cfd3cf, 0x3d38d398c9, 0x3d3cfd31cf, 0x3e3181ce31, + 0x3e348ce348, 0x3e6bfbef3e, 0x3efebfe73e, 0x4142c7124c, 0x4171f4cf41, + 0x426247624f, 0x4262762542, 0x426296242e, 0x4262e76242, 0x42c249c279, + 0x437f4cf437, 0x4546c576c7, 0x4547ac57ac, 0x4548c548ac, 0x4549569a69, + 0x45b457e57e, 0x46ad4696ad, 0x479a7a24a2, 0x47d467c67d, 0x47d497dc9c, + 0x47dec4d7e7, 0x47ef4ef7e6, 0x48457845a7, 0x4847e78ce7, 0x48498ae9ae, + 0x484a78ca78, 0x4916a41691, 0x49cad49cad, 0x49ed49ed96, 0x49fea4fe9f, + 0x4adafd9f4d, 0x4aea249ea2, 0x4b14714bcb, 0x4b43e349e3, 0x4b47349734, + 0x4bded9ed4d, 0x4c247ec27e, 0x4c26742c6c, 0x4ced4ced1c, 0x4cfcdf47d4, + 0x4d4191ad41, 0x4d459ad459, 0x4d6cecde4d, 0x4dedced84d, 0x4f46aef4ae, + 0x4f49f479a7, 0x4f4aef41ae, 0x5152a8125a, 0x5181e5ae51, 0x526258625e, + 0x526276252d, 0x5262d86252, 0x52a257a287, 0x53c538d38d, 0x548e5ae548, + 0x56bf5676bf, 0x5716b51671, 0x578b2578b2, 0x57abf57abf, 0x57df57df76, + 0x57edb5ed7e, 0x58de5de8d6, 0x58f568a68f, 0x58f578fa7a, 0x58fda5f8d8, + 0x59538953b8, 0x59579bd7bd, 0x5958d89ad8, 0x595ab89ab8, 0x5a258da28d, + 0x5a26852a6a, 0x5adf5adf1a, 0x5aeafe58f5, 0x5bdb257db2, 0x5bfbef7e5f, + 0x5c15815cac, 0x5c54d457d4, 0x5c58457845, 0x5cfdf7df5f, 0x5df56adf5a, + 0x5e56bde5bd, 0x5e57e587b8, 0x5e5bde51bd, 0x5f5171bf51, 0x5f537bf537, + 0x5fdfadf95f, 0x62762f5267, 0x62862e3268, 0x62962d4269, 0x6762562687, + 0x676b456b47, 0x6796246267, 0x67cfc674fc, 0x6862362698, 0x686c536c58, + 0x68aea685ea, 0x696a346a39, 0x69bdb693db, 0x7121c21871, 0x73467c6734, + 0x76714c6174, 0x79121b2171, 0x7a78ca785c, 0x7a7cfca4fc, 0x7bc517bc51, + 0x7bdf57dfbd, 0x7bfe7bfeb2, 0x7ce7bce4b4, 0x7cef47ecfc, 0x7cfd7fdcf2, + 0x7d7bd7c5bc, 0x7e57e6b676, 0x7efe4fea7e, 0x8121a21981, 0x84568a6845, + 0x86815a6185, 0x8a318ca318, 0x8ad8cad5c5, 0x8ade58daea, 0x8aef8efae2, + 0x8b89ab893a, 0x8b8aeab5ea, 0x8ced8cedc2, 0x8cfe38fecf, 0x8d38d6c686, + 0x8ded5edb8d, 0x8f8cf8a3ca, 0x95369b6953, 0x96913b6193, 0x9ab419ab41, + 0x9adf9adfa2, 0x9aed49edae, 0x9bde9debd2, 0x9bf9abf3a3, 0x9bfd39fbdb, + 0x9c97bc974b, 0x9c9bdbc3db, 0x9e9ae9b4ab, 0x9f49f6a696, 0x9fdf3dfc9f, + 0xa616516ba6, 0xac616416a6, 0xb616316cb6, 0xd3adf9dfd3, 0xd3dfcdfd73, + 0xd7df5dfda7, 0xe5ced8ede5, 0xe5edbede95, 0xe9ed4edec9, 0xf4bfe7fef4, + 0xf4feafef84, 0xf8fe3fefb8, + }; + + map[42] = { + 0x16af, 0x176f, 0x1a7f, 0x316f, 0x321f, 0x362f, 0x62af, 0x6a3f, 0xa31f, + 0xa73f, 0xf21a, 0xf736, 0x14546a, 0x145476, 0x1454a7, 0x16abcb, + 0x16bcdb, 0x16cbec, 0x16dbdf, 0x16dbed, 0x16ecde, 0x16ecef, 0x176898, + 0x176ded, 0x1789b8, 0x1798c9, 0x17b8bf, 0x17b8cb, 0x17bcb6, 0x17c9bc, + 0x17c9cf, 0x186cad, 0x189ad8, 0x189b8a, 0x189d86, 0x189db8, 0x18dfc9, + 0x196bae, 0x198ae9, 0x198c9a, 0x198e96, 0x198ec9, 0x19efb8, 0x1a7898, + 0x1a7bcb, 0x1a7ded, 0x1ad8ed, 0x1ae9de, 0x1b8baf, 0x1b8cba, 0x1b8dbf, + 0x1b8dcb, 0x1b8fec, 0x1bae78, 0x1bd7cb, 0x1c9bca, 0x1c9caf, 0x1c9ebc, + 0x1c9ecf, 0x1c9fdb, 0x1cad79, 0x1ce7bc, 0x1d86df, 0x1d86ed, 0x1db8ed, + 0x1dbd7f, 0x1dbf9e, 0x1debd7, 0x1e96de, 0x1e96ef, 0x1ec9de, 0x1ece7f, + 0x1ecf8d, 0x1edce7, 0x1f8ad8, 0x1f9ae9, 0x21bcdb, 0x21bdbf, 0x21cbec, + 0x21cecf, 0x21dbed, 0x21deda, 0x21ecde, 0x23723f, 0x26726f, 0x2a4584, + 0x2a484f, 0x2a5495, 0x2a595f, 0x2a8498, 0x2a8981, 0x2a9589, 0x316898, + 0x316ded, 0x3189b8, 0x3198c9, 0x31b8bf, 0x31b8cb, 0x31c9bc, 0x31c9cf, + 0x321454, 0x321bcb, 0x321ded, 0x324584, 0x325495, 0x32848f, 0x328498, + 0x328981, 0x329589, 0x32959f, 0x34196b, 0x3456b4, 0x345846, 0x345b41, + 0x345b84, 0x34bf95, 0x35186c, 0x3546c5, 0x354956, 0x354c51, 0x354c95, + 0x35cf84, 0x362454, 0x362898, 0x362bcb, 0x36b4cb, 0x36c5bc, 0x38486f, + 0x384986, 0x384b8f, 0x384b98, 0x384fc9, 0x386c24, 0x38b298, 0x395896, + 0x39596f, 0x395c89, 0x395c9f, 0x395fb8, 0x396b25, 0x39c289, 0x3b41bf, + 0x3b41cb, 0x3b84cb, 0x3b8b2f, 0x3b8f5c, 0x3bc8b2, 0x3c51bc, 0x3c51cf, + 0x3c95bc, 0x3c9c2f, 0x3c9f4b, 0x3cb9c2, 0x3ded62, 0x3f46b4, 0x3f56c5, + 0x416cad, 0x4196ad, 0x4196ba, 0x4196db, 0x41976d, 0x419a7d, 0x41c9ad, + 0x41dfc9, 0x421cad, 0x4542a1, 0x454316, 0x45462a, 0x4546a3, 0x454736, + 0x456ad4, 0x456b4a, 0x457d46, 0x45846a, 0x4584a7, 0x45a7d4, 0x45ad41, + 0x45ad84, 0x45b41a, 0x45b84a, 0x45d416, 0x45d421, 0x45d6b4, 0x45d846, + 0x45db41, 0x45db84, 0x462cad, 0x46ad4f, 0x46b4fa, 0x47d4f6, 0x487654, + 0x4a7d4f, 0x4ad41f, 0x4ad84f, 0x4adf95, 0x4b41fa, 0x4baf95, 0x4d416f, + 0x4d421f, 0x4d4f62, 0x4d5462, 0x4d6b4f, 0x4db84f, 0x4dbf95, 0x4dcb95, + 0x4df6c5, 0x4df956, 0x4dfc51, 0x4dfc95, 0x516bae, 0x5186ae, 0x5186ca, + 0x5186ec, 0x51876e, 0x518a7e, 0x51b8ae, 0x51efb8, 0x521bae, 0x546ae5, + 0x546c5a, 0x547e56, 0x54956a, 0x5495a7, 0x54a7e5, 0x54ae51, 0x54ae95, + 0x54c51a, 0x54c95a, 0x54e516, 0x54e521, 0x54e6c5, 0x54e956, 0x54ec51, + 0x54ec95, 0x562bae, 0x56ae5f, 0x56c5fa, 0x57e5f6, 0x597645, 0x5a7e5f, + 0x5ae51f, 0x5ae95f, 0x5aef84, 0x5c51fa, 0x5caf84, 0x5e4562, 0x5e516f, + 0x5e521f, 0x5e5f62, 0x5e6c5f, 0x5ebc84, 0x5ec95f, 0x5ecf84, 0x5ef6b4, + 0x5ef846, 0x5efb41, 0x5efb84, 0x62a898, 0x62abcb, 0x62bcdb, 0x62cbec, + 0x62dbdf, 0x62dbed, 0x62deda, 0x62ecde, 0x62ecef, 0x6898a3, 0x6a3ded, + 0x6ad4ed, 0x6ae5de, 0x6b4cba, 0x6b4dcb, 0x6b4fec, 0x6bae34, 0x6bcdb3, + 0x6c5bca, 0x6c5ebc, 0x6c5fdb, 0x6cad35, 0x6cbec3, 0x6d352b, 0x6d42ed, + 0x6db4ed, 0x6dbd3f, 0x6dbed3, 0x6dbf5e, 0x6e342c, 0x6e52de, 0x6ec5de, + 0x6ecde3, 0x6ece3f, 0x6ecf4d, 0x71271f, 0x7389b8, 0x738b8f, 0x7398c9, + 0x739c9f, 0x73b8cb, 0x73bcb6, 0x73c9bc, 0x76de4d, 0x76ed5e, 0x7a27af, + 0x846cad, 0x8486af, 0x84876f, 0x848fa7, 0x8498a7, 0x84ba98, 0x84d986, + 0x84db98, 0x84dfc9, 0x84edc9, 0x84fae9, 0x84fc9a, 0x84fe6c, 0x84fe96, + 0x84fec9, 0x86ae34, 0x86c24a, 0x86ca34, 0x86cad3, 0x86ec34, 0x876e34, + 0x89486a, 0x894876, 0x894ad8, 0x89816a, 0x898736, 0x898a31, 0x898a73, + 0x89a348, 0x89ad83, 0x89b8a3, 0x89d863, 0x89db83, 0x8a348f, 0x8a7e34, + 0x8ad8f3, 0x8b2a98, 0x8d3fc9, 0x8d86f3, 0x956bae, 0x9589a7, 0x9596af, + 0x95976f, 0x959fa7, 0x95ca89, 0x95deb8, 0x95e896, 0x95ec89, 0x95efb8, + 0x95fad8, 0x95fb8a, 0x95fd6b, 0x95fd86, 0x95fdb8, 0x96ad35, 0x96b25a, + 0x96ba35, 0x96bae3, 0x96db35, 0x976d35, 0x98596a, 0x985976, 0x985ae9, + 0x98a359, 0x98ae93, 0x98c9a3, 0x98e963, 0x98ec93, 0x9a359f, 0x9a7d35, + 0x9ae9f3, 0x9c2a89, 0x9e3fb8, 0x9e96f3, 0xa31454, 0xa31bcb, 0xa34584, + 0xa35495, 0xa73bcb, 0xa73ded, 0xa74543, 0xa7d4ed, 0xa7e5de, 0xad3518, + 0xad41ed, 0xad84ed, 0xad8ed3, 0xad8f5e, 0xae3419, 0xae51de, 0xae95de, + 0xae9de3, 0xae9f4d, 0xb2a518, 0xb32518, 0xb41dcb, 0xb41fec, 0xb84cba, + 0xb84dcb, 0xb84fec, 0xb8ae34, 0xb8b2af, 0xb8ba3f, 0xb8d3cb, 0xb8f5ae, + 0xb8f5ca, 0xb8f5ec, 0xb8fe3c, 0xba3518, 0xbae318, 0xbae341, 0xbae738, + 0xbc4ba1, 0xbc8b2a, 0xbc8ba3, 0xbcb21a, 0xbcb316, 0xbcb6a3, 0xbcdb31, + 0xbd73cb, 0xc2a419, 0xc32419, 0xc51ebc, 0xc51fdb, 0xc95bca, 0xc95ebc, + 0xc95fdb, 0xc9ad35, 0xc9c2af, 0xc9ca3f, 0xc9e3bc, 0xc9f4ad, 0xc9f4ba, + 0xc9f4db, 0xc9fd3b, 0xca3419, 0xcad319, 0xcad351, 0xcad739, 0xcb5ca1, + 0xcb9c2a, 0xcb9ca3, 0xcbec31, 0xce73bc, 0xd1796b, 0xd3196b, 0xd3516b, + 0xd35186, 0xd351b8, 0xd3521b, 0xd3bf95, 0xd416ed, 0xd421ed, 0xd7396b, + 0xd864ed, 0xd86f5e, 0xdb41ed, 0xdb84ed, 0xdb8ed3, 0xdb8f5e, 0xdbd31f, + 0xdbd73f, 0xdbf51e, 0xdbf95e, 0xdbf9e3, 0xde8d36, 0xdebd31, 0xdebd73, + 0xded16a, 0xded763, 0xdeda31, 0xe1786c, 0xe3186c, 0xe3416c, 0xe34196, + 0xe341c9, 0xe3421c, 0xe3cf84, 0xe516de, 0xe521de, 0xe7386c, 0xe965de, + 0xe96f4d, 0xec51de, 0xec95de, 0xec9de3, 0xec9f4d, 0xece31f, 0xece73f, + 0xecf41d, 0xecf84d, 0xecf8d3, 0xed9e36, 0xedce31, 0xedce73, 0xf4ba84, + 0xf4d846, 0xf5ca95, 0xf5e956, 0xf8d3b8, 0xf9e3c9, 0xfb41db, 0xfc51ec, + 0x12712898, 0x12712ded, 0x1289d812, 0x128d812f, 0x1298e912, 0x129e912f, + 0x12bcb712, 0x12d812ed, 0x12e912de, 0x14139146, 0x1416ca34, 0x141914a7, + 0x1419a314, 0x142ae714, 0x142c7124, 0x14712914, 0x14914ae9, 0x14914c9a, + 0x14914e96, 0x14e7146c, 0x15138156, 0x1516ba35, 0x151815a7, 0x1518a315, + 0x152ad715, 0x152b7125, 0x15712815, 0x15815ad8, 0x15815b8a, 0x15815d86, + 0x15d7156b, 0x1686c6a8, 0x168c68ec, 0x1696b6a9, 0x169b69db, 0x16abeabe, + 0x16acdacd, 0x16bfbefb, 0x16cfcdfc, 0x17145b41, 0x17154c51, 0x171b41cb, + 0x171c51bc, 0x17419164, 0x17518165, 0x176b96b9, 0x176c86c8, 0x178151b8, + 0x1787e7b8, 0x178f8cf8, 0x179141c9, 0x1797d7c9, 0x179f9bf9, 0x17d7976d, + 0x17e7876e, 0x186c6d86, 0x189cd89c, 0x18b8cbec, 0x18f8aef8, 0x18f8cf8a, + 0x18f8ef86, 0x196b6e96, 0x198be98b, 0x19c9bcdb, 0x19f9adf9, 0x19f9bf9a, + 0x19f9df96, 0x1a7dacad, 0x1a7eabae, 0x1ad8acad, 0x1ae9abae, 0x1b8abeab, + 0x1bde9bde, 0x1bef9ebf, 0x1bf9bfdb, 0x1bfe7bfb, 0x1c9acdac, 0x1cdf8dcf, + 0x1ced8ced, 0x1cf8cfec, 0x1cfd7cfc, 0x1d797bd7, 0x1d797da7, 0x1e787ce7, + 0x1e787ea7, 0x1fdfc9fd, 0x1fefb8fe, 0x2142c24a, 0x214c24ec, 0x2152b25a, + 0x215b25db, 0x21bfbefb, 0x21cfcdfc, 0x21dacada, 0x21eabaea, 0x23289873, + 0x2328d398, 0x2329e389, 0x232d8d3f, 0x232de8d3, 0x232e9e3f, 0x232ed9e3, + 0x237bcb23, 0x242c2a84, 0x243e2384, 0x24546276, 0x248c24ec, 0x252b2a95, + 0x253d2395, 0x259b25db, 0x26289d86, 0x2628d86f, 0x26298e96, 0x2629e96f, + 0x262d86ed, 0x262ded76, 0x262e96de, 0x28986276, 0x28b2db8f, 0x28b2db98, + 0x29c2ec89, 0x29c2ec9f, 0x2a4f49f4, 0x2a5f58f5, 0x2a815181, 0x2a914191, + 0x2b6926db, 0x2c6826ec, 0x2d4284df, 0x2d4284ed, 0x2ded3273, 0x2e5295de, + 0x2e5295ef, 0x314914c9, 0x315815b8, 0x3168c68c, 0x3169b69b, 0x318f8cf8, + 0x319f9bf9, 0x32185185, 0x32194194, 0x3242c284, 0x324f49f4, 0x3252b295, + 0x325f58f5, 0x32737454, 0x32b2521b, 0x32c2421c, 0x32d3531d, 0x32d3573d, + 0x32d7397d, 0x32e3431e, 0x32e3473e, 0x32e7387e, 0x34191b41, 0x343e36b4, + 0x343e3846, 0x343e3b41, 0x3459b459, 0x345b4373, 0x348498c9, 0x34f46cf4, + 0x34f49f46, 0x34f4cf41, 0x35181c51, 0x353d36c5, 0x353d3956, 0x353d3c51, + 0x3548c548, 0x354c5373, 0x359589b8, 0x35f56bf5, 0x35f58f56, 0x35f5bf51, + 0x36243e34, 0x36253d35, 0x362b696b, 0x362c686c, 0x36b4696b, 0x36c5686c, + 0x374b437f, 0x375c537f, 0x37b437cb, 0x37c537bc, 0x38468c68, 0x38bc58bc, + 0x38cf5c8f, 0x38f58fb8, 0x38fc28f8, 0x39569b69, 0x39bf4b9f, 0x39cb49cb, + 0x39f49fc9, 0x39fb29f9, 0x3b23e318, 0x3b2528b2, 0x3b252b62, 0x3c23d319, + 0x3c2429c2, 0x3c242c62, 0x3d3196ad, 0x3d356a3d, 0x3d35a31d, 0x3d76c23d, + 0x3e3186ae, 0x3e346a3e, 0x3e34a31e, 0x3e76b23e, 0x3fbf95fb, 0x3fcf84fc, + 0x416c67d6, 0x4191ad41, 0x4191b41a, 0x4191d421, 0x419db414, 0x4237c243, + 0x428478fc, 0x42c2d421, 0x42cd4284, 0x42d42584, 0x434b8e34, 0x43796243, + 0x43e13436, 0x45471271, 0x4547a27a, 0x4578b784, 0x457ab47a, 0x459b459a, + 0x459d4259, 0x459d4596, 0x459d45c9, 0x459db459, 0x45ad4595, 0x45b6b476, + 0x45cd451c, 0x45db7d47, 0x462696ad, 0x462c242a, 0x46b4f676, 0x46bce4bc, + 0x473e3436, 0x4787b84f, 0x48478b98, 0x484798c9, 0x48498ae9, 0x48498c9a, + 0x48498e96, 0x48498ec9, 0x484bc78b, 0x48795248, 0x4914916a, 0x49f49f76, + 0x49f4e9f6, 0x49f4ec9f, 0x49fca49f, 0x4a3e3473, 0x4ae349e3, 0x4b41cbec, + 0x4b84cbec, 0x4c249c2a, 0x4ca34739, 0x4d4546c5, 0x4d62c242, 0x4d8498c9, + 0x4dc249c2, 0x4dcfc84f, 0x4df49f46, 0x4e346ce3, 0x4e349e36, 0x4e34ce31, + 0x4e34ce73, 0x4ef421ef, 0x4ef84ef6, 0x4efb41ef, 0x4efb84ef, 0x4efe6b4f, + 0x4f46aef4, 0x4f46cf4a, 0x4f47ef46, 0x4f49f46a, 0x4f49f4a7, 0x4f4aef41, + 0x4f4aef84, 0x4f4cf41a, 0x4f4ef416, 0x4f9c279c, 0x4fe4f462, 0x4fecf84f, + 0x516b67e6, 0x5181ae51, 0x5181c51a, 0x5181e521, 0x518ec515, 0x5237b253, + 0x529579fb, 0x52b2e521, 0x52be5295, 0x52e52495, 0x535c9d35, 0x53786253, + 0x53d13536, 0x5479c795, 0x547ac57a, 0x548c548a, 0x548e5248, 0x548e5486, + 0x548e54b8, 0x548ec548, 0x54ae5484, 0x54be541b, 0x54c6c576, 0x54ec7e57, + 0x562686ae, 0x562b252a, 0x56c5f676, 0x56cbd5cb, 0x573d3536, 0x5797c95f, + 0x5815816a, 0x58f58f76, 0x58f5d8f6, 0x58f5db8f, 0x58fba58f, 0x595789b8, + 0x59579c89, 0x59589ad8, 0x59589b8a, 0x59589d86, 0x59589db8, 0x595cb79c, + 0x5a3d3573, 0x5ad358d3, 0x5b258b2a, 0x5ba35738, 0x5c51bcdb, 0x5c95bcdb, + 0x5d356bd3, 0x5d358d36, 0x5d35bd31, 0x5d35bd73, 0x5df521df, 0x5df95df6, + 0x5dfc51df, 0x5dfc95df, 0x5dfd6c5f, 0x5e456b4b, 0x5e62b252, 0x5e9589b8, + 0x5eb258b2, 0x5ebfb95f, 0x5ef58f56, 0x5f56adf5, 0x5f56bf5a, 0x5f57df56, + 0x5f58f56a, 0x5f58f5a7, 0x5f5adf51, 0x5f5adf95, 0x5f5bf51a, 0x5f5df516, + 0x5f8b278b, 0x5fd5f562, 0x5fdbf95f, 0x62767bcb, 0x6286c6a8, 0x6286c768, + 0x62876e78, 0x6296b6a9, 0x6296b769, 0x62976d79, 0x62adcadc, 0x62aebaeb, + 0x62b252db, 0x62bfbefb, 0x62c242ec, 0x62cfcdfc, 0x67bc4b67, 0x67cb5c67, + 0x68c685ca, 0x68c68ec3, 0x69b694ba, 0x69b69db3, 0x6abaea3b, 0x6abeab5e, + 0x6acada3c, 0x6acdac4d, 0x6b4abeab, 0x6bdbed5e, 0x6bfbefb3, 0x6c5acdac, + 0x6cecde4d, 0x6cfcdfc3, 0x6dfcdf4d, 0x6efbef5e, 0x6f4fecf4, 0x6f5fdbf5, + 0x7174b41f, 0x7175c51f, 0x738f8cf8, 0x739f9bf9, 0x73b696b6, 0x73c686c6, + 0x73d7976d, 0x73d97dc9, 0x73e7876e, 0x73e87eb8, 0x768e785e, 0x769d794d, + 0x7898a72a, 0x78be785e, 0x79cd794d, 0x7a7b4baf, 0x7a7bc4ba, 0x7a7c5caf, + 0x7a7cb5ca, 0x7bcba27a, 0x7bd47dbf, 0x7bd74dcb, 0x7ce57ecf, 0x7ce75ebc, + 0x7d4a7cad, 0x7e5a7bae, 0x812ca781, 0x818db518, 0x8467c687, 0x8478be78, + 0x84a7e787, 0x84d68c68, 0x84e78ce7, 0x84e78e76, 0x84f8cf8a, 0x8518e516, + 0x85f5ad8f, 0x86aea24a, 0x86c6d863, 0x8712e781, 0x87b82bfe, 0x894d2482, + 0x89cd89c3, 0x89dad82a, 0x8a7aca34, 0x8a7e7873, 0x8ad8fa2a, 0x8ade58de, + 0x8b28bcdb, 0x8b28dbed, 0x8b2c978b, 0x8b2ce8bc, 0x8b8cb5ca, 0x8b8cb5ec, + 0x8b8cbec3, 0x8c68c6a3, 0x8cf85cfa, 0x8cf85ecf, 0x8cf8cf2a, 0x8cfe38cf, + 0x8d86ed5e, 0x8db8ed5e, 0x8e312c81, 0x8e78ce73, 0x8f5efb8f, 0x8f8a35f8, + 0x8f8aef83, 0x8f8cf8a3, 0x8f8ef863, 0x8fce72ce, 0x912ba791, 0x919ec419, + 0x9419d416, 0x94f4ae9f, 0x9567b697, 0x9579cd79, 0x95a7d797, 0x95d79bd7, + 0x95d79d76, 0x95e69b69, 0x95f9bf9a, 0x96ada25a, 0x96b6e963, 0x9712d791, + 0x97c92cfd, 0x985e2592, 0x98be98b3, 0x98eae92a, 0x9a7aba35, 0x9a7d7973, + 0x9ae9fa2a, 0x9aed49ed, 0x9b69b6a3, 0x9bf94bfa, 0x9bf94dbf, 0x9bf9bf2a, + 0x9bfd39bf, 0x9c29cbec, 0x9c29ecde, 0x9c2bd9cb, 0x9c9bc4ba, 0x9c9bc4db, + 0x9c9bcdb3, 0x9d312b91, 0x9d79bd73, 0x9e96de4d, 0x9ec9de4d, 0x9f4dfc9f, + 0x9f9a34f9, 0x9f9adf93, 0x9f9bf9a3, 0x9f9df963, 0x9fbd72bd, 0xa27a2ded, + 0xa2de8da2, 0xa2ed9ea2, 0xa348e34e, 0xa359d35d, 0xa7b2a52b, 0xa7bae2ab, + 0xa7baea3b, 0xa7c2a42c, 0xa7cad2ac, 0xa7cada3c, 0xa7d4797d, 0xa7dfd5fd, + 0xa7e5787e, 0xa7efe4fe, 0xabeab5e1, 0xabeab9e3, 0xacdac4d1, 0xacdac8d3, + 0xaf8f5ef8, 0xaf9f4df9, 0xb2562b76, 0xb258b2db, 0xb2db7df5, 0xb41abeab, + 0xb6b4d96b, 0xb84abeab, 0xb8a2eab2, 0xb8fbefb3, 0xb96b596a, 0xbd7bde4d, + 0xbd7e5bde, 0xbd7ec2bd, 0xbdbed5e1, 0xbdbed95e, 0xbdbed9e3, 0xbeabea31, + 0xbef51bef, 0xbefb9ef3, 0xbefbef73, 0xbfbefb31, 0xbfe527e5, 0xc2462c76, + 0xc249c2ec, 0xc2ec7ef4, 0xc51acdac, 0xc6c5e86c, 0xc86c486a, 0xc95acdac, + 0xc9a2dac2, 0xc9fcdfc3, 0xcdacda31, 0xcdf41cdf, 0xcdfc8df3, 0xcdfcdf73, + 0xce7ced5e, 0xce7d4ced, 0xcecde4d1, 0xcecde84d, 0xcecde8d3, 0xcfcdfc31, + 0xcfd427d4, 0xd35b8d3d, 0xd425e7d4, 0xd427d4f9, 0xd79a7d2a, 0xd79bd74d, + 0xdad84cad, 0xdbf51fdf, 0xdcad9ca3, 0xdf597259, 0xe34c9e3e, 0xe527e5f8, + 0xe78a7e2a, 0xe78ce75e, 0xeae95bae, 0xebae8ba3, 0xecf41fef, 0xef487248, + 0x124914e912, 0x125815d812, 0x128f8ef812, 0x129f9df912, 0x141361c614, + 0x141712e714, 0x141e714ce7, 0x1461c614ec, 0x151361b615, 0x151712d715, + 0x151d715bd7, 0x1561b615db, 0x1714f4cf41, 0x1715f5bf51, 0x1b676be76b, + 0x1c676cd76c, 0x2142a2ea24, 0x2152a2da25, 0x21b21912db, 0x21c21812ec, + 0x2328fe38f8, 0x2329fd39f9, 0x234e3429e3, 0x235d3528d3, 0x242a2ea284, + 0x242cd427d4, 0x252a2da295, 0x252be527e5, 0x2628f8ef86, 0x2629f9df96, + 0x26b96b2e96, 0x26c86c2d86, 0x281832e318, 0x28b278be78, 0x28bfe2b8fb, + 0x291932d319, 0x29c279cd79, 0x29cfd2c9fc, 0x2ab212912b, 0x2ac212812c, + 0x2b258b278b, 0x2c249c279c, 0x2d427d497d, 0x2d4f924df4, 0x2dad6296ad, + 0x2e527e587e, 0x2e5f825ef5, 0x2eae6286ae, 0x2f4d724d24, 0x2f5e725e25, + 0x2fb872b82b, 0x2fc972c92c, 0x31813e31b8, 0x31913d31c9, 0x34b7e34e37, + 0x34f4cf4373, 0x35c7d35d37, 0x35f5bf5373, 0x381218c218, 0x391219b219, + 0x3d319a313d, 0x3d3237c23d, 0x3d3c23d9c2, 0x3e318a313e, 0x3e3237b23e, + 0x3e3b23e8b2, 0x41467c6174, 0x41471e7164, 0x416cd41614, 0x419c1d419c, + 0x424d4257d4, 0x42624962a4, 0x4262962432, 0x4262962d42, 0x42842784f2, + 0x42c7842784, 0x4327397343, 0x43a31ca343, 0x43a348ca34, 0x43ca349ca3, + 0x4528478424, 0x462ae42a24, 0x47397343c9, 0x473ac7a434, 0x4787f84cf8, + 0x4846c686ec, 0x48498e98b8, 0x49fbefb49f, 0x4a37937434, 0x4cf97f9c4f, + 0x4cfdfc7d4f, 0x4d4548c548, 0x4f47acf47a, 0x4f842ef482, 0x4f97bf94fb, + 0x51567b6175, 0x51571d7165, 0x516be51615, 0x518b1e518b, 0x525e5247e5, + 0x52625862a5, 0x5262862532, 0x5262862e52, 0x52952795f2, 0x52b7952795, + 0x5327387353, 0x53a31ba353, 0x53a359ba35, 0x53ba358ba3, 0x5429579525, + 0x562ad52a25, 0x57387353b8, 0x573ab7a535, 0x5797f95bf9, 0x58fcdfc58f, + 0x5956b696db, 0x59589d89c9, 0x5a37837535, 0x5bf87f8b5f, 0x5bfefb7e5f, + 0x5e5459b459, 0x5f57abf57a, 0x5f87cf85fc, 0x5f952df592, 0x674fc674f4, + 0x675fb675f5, 0x678c685c67, 0x679b694b67, 0x6861a6ea68, 0x6862562768, + 0x68a6ea5e68, 0x6961a6da69, 0x6962462769, 0x69a6da4d69, 0x7185187c51, + 0x7194197b41, 0x73d767c67d, 0x73e767b67e, 0x7687357378, 0x7697347379, + 0x76d76c674d, 0x76e76b675e, 0x7842784e78, 0x7952795d79, 0x7adcad75ca, + 0x7aebae74ba, 0x7bd72bd52b, 0x7bdf57dbfd, 0x7ce72ce42c, 0x7cef47ecfe, + 0x7d79bd72bd, 0x7e78ce72ce, 0x7fdb27db7d, 0x7fec27ec7e, 0x81316e3181, + 0x81721c2181, 0x81e318ce31, 0x8468a6ea68, 0x847aca78a7, 0x8784284798, + 0x87a78ca738, 0x87a7ca7817, 0x897b82b878, 0x8b8aeaba5e, 0x8b8cb5cbdb, + 0x8efc2fce8f, 0x8fc2dfc8fd, 0x91316d3191, 0x91721b2191, 0x91d319bd31, + 0x9569a6da69, 0x957aba79a7, 0x9795295789, 0x97a79ba739, 0x97a7ba7917, + 0x987c92c979, 0x9c9adaca4d, 0x9c9bc4bcec, 0x9dfb2fbd9f, 0x9fb2efb9fe, + 0xa28fea28f8, 0xa29fda29f9, 0xa2beab9ea2, 0xa2cdac8da2, 0xaba356a3ab, + 0xaba79a72ab, 0xaca346a3ac, 0xaca78a72ac, 0xb232be321b, 0xb232e32b62, + 0xb2b87b82cb, 0xb616a516b6, 0xb6276e76b6, 0xbc2db7db2b, 0xbd5359bd35, + 0xbdbed9ed4d, 0xbf4efb7ef4, 0xc232cd321c, 0xc232d32c62, 0xc2c97c92bc, + 0xc616a416c6, 0xc6276d76c6, 0xcb2ec7ec2c, 0xce4348ce34, 0xcecde8de5e, + 0xcf5dfc7df5, 0xd717517da7, 0xd7db2db7ed, 0xda6a396ada, 0xda72a52ada, + 0xde74d24d7d, 0xdf528f5df8, 0xe717417ea7, 0xe7ec2ec7de, 0xea6a386aea, + 0xea72a42aea, 0xed75e25e7e, 0xef429f4ef9, 0x14161a6ea614, + 0x141714be7141, 0x1417a71ca714, 0x14b914be914b, 0x15161a6da615, + 0x151715cd7151, 0x1517a71ba715, 0x15c815cd815c, 0x1812cd812181, + 0x1912be912191, 0x2181a2aea218, 0x2191a2ada219, 0x232b2e32b9e3, + 0x232c2d32c8d3, 0x246249624e96, 0x24d429ed49ed, 0x256258625d86, + 0x25e528de58de, 0x28b298be98be, 0x29c289cd89cd, 0x31d31c613dc6, + 0x31e31b613eb6, 0x34379b437343, 0x343e93eb43e9, 0x35378c537353, + 0x353d83dc53d8, 0x3d326239623d, 0x3e326238623e, 0x436a96a3a439, + 0x437367c67343, 0x45c8457845c8, 0x47ecb4ecb7ec, 0x4842984e9842, + 0x49cb49cb79c7, 0x4a24ea249ea2, 0x4b46b496be96, 0x4d454c54d7d4, + 0x536a86a3a538, 0x537367b67353, 0x54b9547954b9, 0x57dbc5dbc7db, + 0x58bc58bc78b7, 0x5952895d8952, 0x5a25da258da2, 0x5c56c586cd86, + 0x5e545b45e7e5, 0x6b67e4b676b6, 0x6c67d5c676c6, 0x78a78ca785ca, + 0x79a79ba794ba, 0x813a31ca3181, 0x8ced8ced2ce2, 0x8d8ad8cad5ca, + 0x913a31ba3191, 0x9bde9bde2bd2, 0x9e9ae9bae4ba, + }; + + map[222] = { + 0x1af, 0x1232f, 0x1454a, 0x1676f, 0x1898a, 0x1abcb, 0x1aded, 0x1bcdb, + 0x1bdbf, 0x1cbec, 0x1cecf, 0x1dbed, 0x1ecde, 0x2362f, 0x3273f, 0x4584a, + 0x5495a, 0x6276f, 0x7367f, 0x8498a, 0x9589a, 0xa262f, 0xa373f, 0xa484f, + 0xa595f, 0x121712f, 0x1232898, 0x1232ded, 0x1234542, 0x1245d42, + 0x124d42f, 0x1254e52, 0x125e52f, 0x12bcb32, 0x131613f, 0x1345b43, + 0x134b43f, 0x1354c53, 0x135c53f, 0x141914a, 0x1423c24, 0x142c24a, + 0x1432e34, 0x143e34a, 0x1454676, 0x1454cec, 0x14bdb54, 0x151815a, + 0x1523b25, 0x152b25a, 0x1532d35, 0x153d35a, 0x1676ded, 0x1678986, + 0x1689d86, 0x168d86f, 0x1698e96, 0x169e96f, 0x16bcb76, 0x1789b87, + 0x178b87f, 0x1798c97, 0x179c97f, 0x1867c68, 0x186c68a, 0x1876e78, + 0x187e78a, 0x1898cec, 0x18bdb98, 0x1967b69, 0x196b69a, 0x1976d79, + 0x197d79a, 0x1abaeab, 0x1acadac, 0x1b252db, 0x1b434cb, 0x1b696db, + 0x1b878cb, 0x1befbef, 0x1c242ec, 0x1c535bc, 0x1c686ec, 0x1c979bc, + 0x1cdfcdf, 0x1d353bd, 0x1d424ed, 0x1d797bd, 0x1d868ed, 0x1e343ce, + 0x1e525de, 0x1e787ce, 0x1e969de, 0x2324584, 0x2325495, 0x232848f, + 0x2328498, 0x2329589, 0x232959f, 0x234196b, 0x2345462, 0x235186c, + 0x2362bcb, 0x2389862, 0x238b28f, 0x238b298, 0x239c289, 0x239c29f, + 0x23b8cb2, 0x23c9bc2, 0x23ded62, 0x245462a, 0x245d462, 0x24d4f62, + 0x24dfc95, 0x254e562, 0x25e5f62, 0x25efb84, 0x262a898, 0x262bcdb, + 0x262cbec, 0x262dbdf, 0x262dbed, 0x262deda, 0x262ecde, 0x262ecef, + 0x26bae34, 0x26cad35, 0x26d42ed, 0x26e52de, 0x27a27af, 0x28b2a8f, + 0x28b2a98, 0x29c2a89, 0x29c2a9f, 0x2b8f5ec, 0x2bc8b2a, 0x2bcb62a, + 0x2c9f4db, 0x2cb9c2a, 0x324197d, 0x3245473, 0x325187e, 0x3273ded, + 0x3289873, 0x328d38f, 0x328d398, 0x329e389, 0x329e39f, 0x32bcb73, + 0x32d8ed3, 0x32e9de3, 0x345473a, 0x345b473, 0x34b4f73, 0x34bfe95, + 0x354c573, 0x35c5f73, 0x35cfd84, 0x36a36af, 0x373a898, 0x373bcba, + 0x373bdbf, 0x373bdcb, 0x373cebc, 0x373cecf, 0x373dbed, 0x373ecde, + 0x37b43cb, 0x37c53bc, 0x37dac24, 0x37eab25, 0x38d3a8f, 0x38d3a98, + 0x39e3a89, 0x39e3a9f, 0x3d8f5ce, 0x3de8d3a, 0x3ded73a, 0x3e9f4bd, + 0x3ed9e3a, 0x423c284, 0x42c2a84, 0x432e384, 0x43e3a84, 0x452178b, + 0x453168d, 0x4546276, 0x4547367, 0x4567684, 0x456b46a, 0x456b476, + 0x457d467, 0x457d47a, 0x4584bdb, 0x45b6db4, 0x45cec84, 0x45d7bd4, + 0x467684f, 0x46b4f6a, 0x46b4f76, 0x47d4f67, 0x47d4f7a, 0x484abcb, + 0x484aded, 0x484bcdb, 0x484cbec, 0x484cecf, 0x484dbed, 0x484edce, + 0x48c24ec, 0x48e34ce, 0x49f49fa, 0x4b6a3ed, 0x4bd6b4f, 0x4bdb84f, + 0x4d7a2cb, 0x4db7d4f, 0x523b295, 0x52b2a95, 0x532d395, 0x53d3a95, + 0x542179c, 0x543169e, 0x5467695, 0x546c56a, 0x546c576, 0x547e567, + 0x547e57a, 0x5495cec, 0x54bdb95, 0x54c6ec5, 0x54e7ce5, 0x567695f, + 0x56c5f6a, 0x56c5f76, 0x57e5f67, 0x57e5f7a, 0x58f58fa, 0x595abcb, + 0x595aded, 0x595bcdb, 0x595bdbf, 0x595cbec, 0x595debd, 0x595ecde, + 0x59b25db, 0x59d35bd, 0x5c6a3de, 0x5ce6c5f, 0x5cec95f, 0x5e7a2bc, + 0x5ec7e5f, 0x6276898, 0x6276bcb, 0x6289d86, 0x628d86f, 0x6298e96, + 0x629e96f, 0x62bae78, 0x62cad79, 0x62d86ed, 0x62ded76, 0x62e96de, + 0x6768498, 0x6769589, 0x678152b, 0x679142c, 0x67b4cb6, 0x67c5bc6, + 0x68dfc59, 0x69efb48, 0x6b4f9ec, 0x6bc4b6a, 0x6c5f8db, 0x6cb5c6a, + 0x7367898, 0x7367ded, 0x7389b87, 0x738b87f, 0x7398c97, 0x739c97f, + 0x73b87cb, 0x73bcb67, 0x73c97bc, 0x73dac68, 0x73eab69, 0x768153d, + 0x769143e, 0x76d4ed7, 0x76e5de7, 0x78bfe59, 0x79cfd48, 0x7d4f9ce, + 0x7de4d7a, 0x7e5f8bd, 0x7ed5e7a, 0x8467c68, 0x846c68a, 0x8476e78, + 0x847e78a, 0x8498bdb, 0x84c68ec, 0x84cec98, 0x84e78ce, 0x896134b, + 0x897124d, 0x89b2db8, 0x89d3bd8, 0x8b2a7ed, 0x8bd2b8f, 0x8d3a6cb, + 0x8db3d8f, 0x9567b69, 0x956b69a, 0x9576d79, 0x957d79a, 0x9589cec, + 0x95b69db, 0x95bdb89, 0x95d79bd, 0x986135c, 0x987125e, 0x98c2ec9, + 0x98e3ce9, 0x9c2a7de, 0x9ce2c9f, 0x9e3a6bc, 0x9ec3e9f, 0xab2562b, + 0xab6296b, 0xac2462c, 0xac6286c, 0xad3573d, 0xad7397d, 0xae3473e, + 0xae7387e, 0xb2562db, 0xb4384cb, 0xb4834bf, 0xb6296db, 0xb8478bf, + 0xb8478cb, 0xc2462ec, 0xc5395bc, 0xc5935cf, 0xc6286ec, 0xc9579bc, + 0xc9579cf, 0xd3573bd, 0xd4284ed, 0xd4824df, 0xd7397bd, 0xd8468df, + 0xd8468ed, 0xe3473ce, 0xe5295de, 0xe5925ef, 0xe7387ce, 0xe9569de, + 0xe9569ef, 0x12189d812, 0x12198e912, 0x121d812ed, 0x121d8df12, + 0x121ded712, 0x121e912de, 0x121e9ef12, 0x123249149, 0x123258158, + 0x124191d42, 0x1242c2d42, 0x124efe4f2, 0x125181e52, 0x1252b2e52, + 0x125dfd5f2, 0x128157128, 0x128712e78, 0x129147129, 0x129712d79, + 0x12bcb7127, 0x13189b813, 0x13198c913, 0x131b813cb, 0x131b8bf13, + 0x131bcb613, 0x131c913bc, 0x131c9cf13, 0x134191b43, 0x1343e3b43, + 0x134cfc4f3, 0x135181c53, 0x1353d3c53, 0x135bfb5f3, 0x138156138, + 0x138613c68, 0x139146139, 0x139613b69, 0x13ded6136, 0x14167c614, + 0x14176e714, 0x141c614ec, 0x141c6ca14, 0x141cec914, 0x141e714ce, + 0x141e7ea14, 0x142171c24, 0x142a2ea24, 0x143161e34, 0x143a3ca34, + 0x145427127, 0x145436136, 0x146914e96, 0x147914c97, 0x14bdb9149, + 0x15167b615, 0x15176d715, 0x151b615db, 0x151b6ba15, 0x151bdb815, + 0x151d715bd, 0x151d7da15, 0x152171b25, 0x152a2da25, 0x153161d35, + 0x153a3ba35, 0x156815d86, 0x157815b87, 0x15cec8158, 0x16145d416, + 0x16154e516, 0x161d416ed, 0x161d4df16, 0x161e516de, 0x161e5ef16, + 0x167641914, 0x167651815, 0x1686c6d86, 0x168efe8f6, 0x1696b6e96, + 0x169dfd9f6, 0x17145b417, 0x17154c517, 0x171b417cb, 0x171b4bf17, + 0x171c517bc, 0x171c5cf17, 0x1787e7b87, 0x178cfc8f7, 0x1797d7c97, + 0x179bfb9f7, 0x18123c218, 0x18132e318, 0x181c218ec, 0x181c2ca18, + 0x181e318ce, 0x181e3ea18, 0x186a6ea68, 0x187a7ca78, 0x189821712, + 0x189831613, 0x19123b219, 0x19132d319, 0x191b219db, 0x191b2ba19, + 0x191d319bd, 0x191d3da19, 0x196a6da69, 0x197a7ba79, 0x232b2562b, + 0x232b6296b, 0x232c2462c, 0x232c6286c, 0x232d3573d, 0x232d7397d, + 0x232e3473e, 0x232e7387e, 0x232f49f49, 0x232f58f58, 0x234c249c2, + 0x235b258b2, 0x238fc28f8, 0x239fb29f9, 0x23d62535d, 0x23d76c23d, + 0x23e62434e, 0x23e76b23e, 0x24237c243, 0x242c26d42, 0x242cd4284, + 0x243796243, 0x24547a27a, 0x2459d4259, 0x248795248, 0x24c249c2a, + 0x24d542484, 0x24dc249c2, 0x24f4ef462, 0x25237b253, 0x252b26e52, + 0x252be5295, 0x253786253, 0x2548e5248, 0x25b258b2a, 0x25e452595, + 0x25eb258b2, 0x25f5df562, 0x26286c768, 0x262876e78, 0x26296b769, + 0x262976d79, 0x262abaeab, 0x262acadac, 0x262fbefbe, 0x262fcdfcd, + 0x27a27a898, 0x27a27abcb, 0x27a27aded, 0x28b28bcdb, 0x28b28dbed, + 0x28b2ce8bc, 0x28fc2a8f8, 0x29c29cbec, 0x29c29ecde, 0x29c2bd9cb, + 0x29fb2a9f9, 0x2a28da28f, 0x2a28da298, 0x2a29ea289, 0x2a29ea29f, + 0x2a7b2a52b, 0x2a7bae2ab, 0x2a7c2a42c, 0x2a7cad2ac, 0x2ade8da2a, + 0x2aed9ea2a, 0x2b2562b76, 0x2b258b2db, 0x2b8ab2eab, 0x2bd7ec2bd, + 0x2c2462c76, 0x2c249c2ec, 0x2c9ac2dac, 0x2d42d8498, 0x2d7a297da, + 0x2e52e9589, 0x2e7a287ea, 0x324e349e3, 0x325d358d3, 0x328fe38f8, + 0x329fd39f9, 0x343e37b43, 0x343eb4384, 0x34546a36a, 0x3459b4359, + 0x348695348, 0x34b543484, 0x34be349e3, 0x34e349e3a, 0x34f4cf473, + 0x353d37c53, 0x353dc5395, 0x3548c5348, 0x35c453595, 0x35cd358d3, + 0x35d358d3a, 0x35f5bf573, 0x36a36a898, 0x36a36abcb, 0x36a36aded, + 0x373867c68, 0x37387e678, 0x373967b69, 0x37397d679, 0x373abeabe, + 0x373acdacd, 0x373fbfefb, 0x373fcfdfc, 0x38d38bdcb, 0x38d38debd, + 0x38d3ec8de, 0x38fe3a8f8, 0x39e39cebc, 0x39e39edce, 0x39e3db9ed, + 0x39fd3a9f9, 0x3a38ba38f, 0x3a38ba398, 0x3a39ca389, 0x3a39ca39f, + 0x3a6d3a53d, 0x3a6dac3ad, 0x3a6e3a43e, 0x3a6eab3ae, 0x3abc8ba3a, + 0x3acb9ca3a, 0x3b43b8498, 0x3b6a396ba, 0x3c53c9589, 0x3c6a386ca, + 0x3d3573d67, 0x3d358d3bd, 0x3d8ad3cad, 0x3db6ce3db, 0x3e3473e67, + 0x3e349e3ce, 0x3e9ae3bae, 0x42a2ea284, 0x42cd427d4, 0x42d4257d4, + 0x42d427d4f, 0x43a3ca384, 0x43b4356b4, 0x43b436b4f, 0x43eb436b4, + 0x454b8478b, 0x454c9579c, 0x454d8468d, 0x454e9569e, 0x456ad46a6, + 0x457ab47a7, 0x45c98d45c, 0x45e98b45e, 0x46ad4f6a6, 0x46b46bdcb, + 0x46b46cbec, 0x46b4de6bd, 0x47ab4f7a7, 0x47d47dbed, 0x47d47edce, + 0x47d4bc7db, 0x48468d986, 0x484698e96, 0x48478b987, 0x484798c97, + 0x484abaeab, 0x484acadac, 0x484fbfefb, 0x484fcdfcd, 0x49f46769f, + 0x49f49cecf, 0x49f49fbdb, 0x4a6cfc64f, 0x4a7efe74f, 0x4b436b4cb, + 0x4b6fb4efb, 0x4bc9ed4bc, 0x4c9f479cf, 0x4cfd9f4cf, 0x4d427d4ed, + 0x4d7fd4cfd, 0x4e9f469ef, 0x4efb9f4ef, 0x4f46cf476, 0x4f47ef467, + 0x4f9b4f34b, 0x4f9d4f24d, 0x4fce6cf4f, 0x4fec7ef4f, 0x52a2da295, + 0x52be527e5, 0x52e5247e5, 0x52e527e5f, 0x53a3ba395, 0x53c5346c5, + 0x53c536c5f, 0x53dc536c5, 0x546ae56a6, 0x547ac57a7, 0x56ae5f6a6, + 0x56c56bcdb, 0x56c56cebc, 0x56c5ed6ce, 0x57ac5f7a7, 0x57e57debd, + 0x57e57ecde, 0x57e5cb7ec, 0x58f56768f, 0x58f58bdbf, 0x58f58fcec, + 0x595689d86, 0x59569e896, 0x595789b87, 0x59579c897, 0x595abaeab, + 0x595acadac, 0x595fbefbe, 0x595fcfdfc, 0x5a6bfb65f, 0x5a7dfd75f, + 0x5b8f578bf, 0x5bfe8f5bf, 0x5c536c5bc, 0x5c6fc5dfc, 0x5cb8de5cb, + 0x5d8f568df, 0x5dfc8f5df, 0x5e527e5de, 0x5e7fe5bfe, 0x5f56bf576, + 0x5f57df567, 0x5f8c5f35c, 0x5f8e5f25e, 0x5fbd6bf5f, 0x5fdb7df5f, + 0x6286c6d86, 0x628f8ef86, 0x6296b6e96, 0x629f9df96, 0x678c685c6, + 0x679b694b6, 0x686cd8648, 0x68c685c6a, 0x68dc685c6, 0x696be9659, + 0x69b694b6a, 0x69eb694b6, 0x6ade4da6a, 0x6aed5ea6a, 0x6b46bd96b, + 0x6b4ab6eab, 0x6c56ce86c, 0x6c5ac6dac, 0x7387e7b87, 0x738f8cf87, + 0x7397d7c97, 0x739f9bf97, 0x768e785e7, 0x769d794d7, 0x787eb8748, + 0x78be785e7, 0x78e785e7a, 0x797dc9759, 0x79cd794d7, 0x79d794d7a, + 0x7abc4ba7a, 0x7acb5ca7a, 0x7d47db97d, 0x7d4ad7cad, 0x7e57ec87e, + 0x7e5ae7bae, 0x846a6ea68, 0x847a7ca78, 0x86cd863d8, 0x86d863d8f, + 0x86d8693d8, 0x87b872b8f, 0x87b8792b8, 0x87eb872b8, 0x8b28bc78b, + 0x8b2fb8efb, 0x8d38de68d, 0x8d3fd8cfd, 0x8fce2cf8f, 0x8fec3ef8f, + 0x956a6da69, 0x957a7ba79, 0x96be963e9, 0x96e963e9f, 0x96e9683e9, + 0x97c972c9f, 0x97c9782c9, 0x97dc972c9, 0x9c29cb79c, 0x9c2fc9dfc, + 0x9e39ed69e, 0x9e3fe9bfe, 0x9fbd2bf9f, 0x9fdb3df9f, 0xaba3573ab, + 0xaba7397ab, 0xaca3473ac, 0xaca7387ac, 0xada2562ad, 0xada6296ad, + 0xaea2462ae, 0xaea6286ae, 0xb258b278b, 0xb436b496b, 0xbf5935fbf, + 0xbf9579fbf, 0xc249c279c, 0xc536c586c, 0xcf4834fcf, 0xcf8478fcf, + 0xd358d368d, 0xd427d497d, 0xdf5925fdf, 0xdf9569fdf, 0xe349e369e, + 0xe527e587e, 0xef4824fef, 0xef8468fef, 0x1214914e912, + 0x1215815d812, 0x1218ef812ef, 0x1219df912df, 0x1314914c913, + 0x1315815b813, 0x1318cf813cf, 0x1319bf913bf, 0x1412712e714, + 0x1413613c614, 0x1416a6ea614, 0x1417a7ca714, 0x1512712d715, + 0x1513613b615, 0x1516a6da615, 0x1517a7ba715, 0x1614d914916, + 0x1614ef416ef, 0x1615df516df, 0x1615e815816, 0x1714b914917, + 0x1714cf417cf, 0x1715bf517bf, 0x1715c815817, 0x1812a2ea218, + 0x1812c712718, 0x1813a3ca318, 0x1813e613618, 0x1912a2da219, + 0x1912b712719, 0x1913a3ba319, 0x1913d613619, 0x1b2b32be32b, + 0x1b4b54be54b, 0x1b6b76be76b, 0x1b8b98be98b, 0x1c2c32cd32c, + 0x1c5c45cd45c, 0x1c6c76cd76c, 0x1c9c89cd89c, 0x23426249624, + 0x23526258625, 0x23b23e38b2b, 0x23c23d39c2c, 0x24262962d42, + 0x242842784f2, 0x24284784254, 0x242c7842784, 0x25262862e52, + 0x252952795f2, 0x25295795245, 0x252b7952795, 0x26b232be32b, + 0x26c232cd32c, 0x27842784e78, 0x27952795d79, 0x27db27db97d, + 0x27ec27ec87e, 0x2a28fea28f8, 0x2a29fda29f9, 0x2abeab9ea2a, + 0x2acdac8da2a, 0x2b257db27db, 0x2b2db27dbf2, 0x2b2db7db2cb, + 0x2c247ec27ec, 0x2c2ec27ecf2, 0x2c2ec7ec2bc, 0x32437349734, + 0x32537358735, 0x32d32c28d3d, 0x32e32b29e3e, 0x34373973b43, + 0x343843684f3, 0x34384684354, 0x343e6843684, 0x35373873c53, + 0x353953695f3, 0x35395695345, 0x353d6953695, 0x36843684c68, + 0x36953695b69, 0x36bd36bd96b, 0x36ce36ce86c, 0x37d323dc23d, + 0x37e323eb23e, 0x3a38fca38f8, 0x3a39fba39f9, 0x3adcad9ca3a, + 0x3aebae8ba3a, 0x3d356bd36bd, 0x3d3bd36bdf3, 0x3d3bd6bd3ed, + 0x3e346ce36ce, 0x3e3ce36cef3, 0x3e3ce6ce3de, 0x424624962a4, + 0x434734973a4, 0x45b45e56b4b, 0x45d45c57d4d, 0x46249624e96, + 0x47349734c97, 0x48b454be54b, 0x48d454dc54d, 0x49cb49cb79c, + 0x49ed49ed69e, 0x4a6aef4efa6, 0x4a7acf4cfa7, 0x4abc94bc4b4, + 0x4ade94de4d4, 0x4b439cb49cb, 0x4b4cb9cb4db, 0x4d429ed49ed, + 0x4d4ed9ed4bd, 0x4fbefb7ef4f, 0x4fdcfd6cf4f, 0x525625862a5, + 0x535735873a5, 0x54c54d46c5c, 0x54e54b47e5e, 0x56258625d86, + 0x57358735b87, 0x58bc58bc78b, 0x58de58de68d, 0x59c545cd45c, + 0x59e545eb45e, 0x5a6adf5dfa6, 0x5a7abf5bfa7, 0x5acb85cb5c5, + 0x5aed85ed5e5, 0x5c538bc58bc, 0x5c5bc8bc5ec, 0x5e528de58de, + 0x5e5de8de5ce, 0x5fcdfc7df5f, 0x5febfe6bf5f, 0x62b676be76b, + 0x62c676cd76c, 0x67862562868, 0x67962462969, 0x67b67e74b6b, + 0x67c67d75c6c, 0x68648348698, 0x69659359689, 0x6abeab5ea6a, + 0x6acdac4da6a, 0x6b6db3db6cb, 0x6c6ec3ec6bc, 0x73d767dc67d, + 0x73e767eb67e, 0x76873573878, 0x76973473979, 0x76d76c64d7d, + 0x76e76b65e7e, 0x78748248798, 0x79759259789, 0x7adcad5ca7a, + 0x7aebae4ba7a, 0x7d7bd2bd7ed, 0x7e7ce2ce7de, 0x84b898be98b, + 0x84d898dc98d, 0x89b89e92b8b, 0x89d89c93d8d, 0x8b8cb5cb8db, + 0x8d8ed5ed8bd, 0x8fbefb3ef8f, 0x8fdcfd2cf8f, 0x95c989cd89c, + 0x95e989eb89e, 0x98c98d82c9c, 0x98e98b83e9e, 0x9c9bc4bc9ec, + 0x9e9de4de9ce, 0x9fcdfc3df9f, 0x9febfe2bf9f, 0xaba27a297ab, + 0xaba356a36ab, 0xaca27a287ac, 0xaca346a36ac, 0xada257a27ad, + 0xada36a396ad, 0xaea247a27ae, 0xaea36a386ae, 0xbf49f479fbf, + 0xbf85f835fbf, 0xcf58f578fcf, 0xcf94f934fcf, 0xdf49f469fdf, + 0xdf85f825fdf, 0xef58f568fef, 0xef94f924fef, + }; + // clang-format on + return map; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp new file mode 100644 index 0000000000..ba5d662c9e --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.cpp @@ -0,0 +1,170 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TokenSwapping/VertexMapResizing.hpp" + +#include +#include + +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +VertexMapResizing::VertexMapResizing(NeighboursInterface& neighbours) + : m_neighbours(neighbours) {} + +const vector& VertexMapResizing::operator()(size_t vertex) { + const auto citer = m_cached_neighbours.find(vertex); + if (citer != m_cached_neighbours.cend()) { + return citer->second; + } + auto& list = m_cached_neighbours[vertex]; + list = m_neighbours(vertex); + for (auto other_v : list) { + m_cached_full_edges.insert(get_swap(vertex, other_v)); + } + return list; +} + +const VertexMapResizing::Result& VertexMapResizing::resize_mapping( + VertexMapping& mapping, unsigned desired_size) { + m_result.success = false; + m_result.edges.clear(); + if (mapping.size() > desired_size) { + for (auto infinite_loop_guard = 1 + mapping.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto old_size = mapping.size(); + remove_vertex(mapping); + const auto new_size = mapping.size(); + if (new_size <= desired_size) { + fill_result_edges(mapping); + m_result.success = true; + return m_result; + } + if (old_size <= new_size) { + return m_result; + } + } + TKET_ASSERT(!"VertexMapResizing::resize_mapping"); + } + TKET_ASSERT(mapping.size() <= desired_size); + bool terminated_correctly = false; + for (auto infinite_loop_guard = 1 + desired_size; infinite_loop_guard > 0; + --infinite_loop_guard) { + const auto old_size = mapping.size(); + if (old_size >= desired_size) { + terminated_correctly = true; + break; + } + add_vertex(mapping); + const auto new_size = mapping.size(); + if (old_size == new_size) { + // Couldn't add a vertex. + terminated_correctly = true; + break; + } + // Must have added exactly one vertex. + TKET_ASSERT(old_size + 1 == new_size); + } + TKET_ASSERT(terminated_correctly); + // It's acceptable to have too few vertices, + // it can still be looked up in the table. + m_result.success = true; + fill_result_edges(mapping); + return m_result; +} + +size_t VertexMapResizing::get_edge_count( + const VertexMapping& mapping, size_t vertex) { + const auto& neighbours = operator()(vertex); + return std::count_if( + neighbours.cbegin(), neighbours.cend(), + // Note that "neighbours" automatically will not contain "vertex" itself. + [&mapping](size_t vertex) { return mapping.count(vertex) != 0; }); +} + +void VertexMapResizing::add_vertex(VertexMapping& mapping) { + std::set new_vertices; + + // Multipass, maybe a bit inefficient, but doesn't matter. + // After a few calls, it's just map lookup so not so bad. + for (const auto& existing_vertex_pair : mapping) { + // A valid mapping should have the same source/target vertices, + // so don't need to consider .second. + const auto& neighbours = operator()(existing_vertex_pair.first); + for (auto vv : neighbours) { + if (mapping.count(vv) == 0) { + new_vertices.insert(vv); + } + } + } + + // Now find the new vertex which would add the largest number of new edges. + size_t maximum_new_edges = 0; + size_t best_new_vertex = std::numeric_limits::max(); + + for (auto new_v : new_vertices) { + const auto edge_count = get_edge_count(mapping, new_v); + if (edge_count > maximum_new_edges) { + best_new_vertex = new_v; + maximum_new_edges = edge_count; + } + } + if (maximum_new_edges > 0) { + mapping[best_new_vertex] = best_new_vertex; + } +} + +void VertexMapResizing::remove_vertex(VertexMapping& mapping) { + const auto invalid_number_of_edges = std::numeric_limits::max(); + + // We want to leave as many edges as possible, + // so we remove the minimum number. + size_t minimum_edges_removed = invalid_number_of_edges; + size_t best_vertex = std::numeric_limits::max(); + for (const auto& existing_vertex_pair : mapping) { + if (existing_vertex_pair.first != existing_vertex_pair.second) { + // The vertex is not fixed, so we cannot remove it. + continue; + } + const auto edge_count = get_edge_count(mapping, existing_vertex_pair.first); + if (edge_count < minimum_edges_removed) { + best_vertex = existing_vertex_pair.first; + minimum_edges_removed = edge_count; + } + } + if (minimum_edges_removed < invalid_number_of_edges) { + TKET_ASSERT(mapping.at(best_vertex) == best_vertex); + TKET_ASSERT(mapping.erase(best_vertex) == 1); + } +} + +void VertexMapResizing::fill_result_edges(const VertexMapping& mapping) { + m_result.edges.clear(); + for (auto citer1 = mapping.cbegin(); citer1 != mapping.cend(); ++citer1) { + auto citer2 = citer1; + for (++citer2; citer2 != mapping.cend(); ++citer2) { + const auto edge = get_swap(citer1->first, citer2->first); + if (m_cached_full_edges.count(edge) != 0) { + m_result.edges.push_back(edge); + } + } + } +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp new file mode 100644 index 0000000000..201ad7e539 --- /dev/null +++ b/tket/src/TokenSwapping/TableLookup/VertexMapResizing.hpp @@ -0,0 +1,120 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +#include "../VertexMappingFunctions.hpp" +#include "TokenSwapping/NeighboursInterface.hpp" + +namespace tket { +namespace tsa_internal { + +/** If a vertex mapping { u -> v } has too few vertices, try to add extra + * vertices, fixed by the new mapping, to get to the desired size. This may + * allow extra optimisations to be found in the table. E.g., imagine a vertex in + * a graph which is not moved by the mapping. Imagine that removing it makes the + * graph disconnected. If the desired mapping moves a token + * between different components, it is then impossible for any swap + * sequence within the subgraph to perform that mapping. + * However, adding the vertex back makes it possible. + * + * If instead there are too many vertices to look up in the table, it tries + * to remove vertices which are fixed by the mapping to get it down to size. + */ +class VertexMapResizing : public NeighboursInterface { + public: + /** Store a Neighbours object, to be used throughout when required to find + * all neighbours of a given vertex. The caller must ensure that the + * object remains valid. + * @param neighbours The object to calculate neighbours of a vertex. + */ + explicit VertexMapResizing(NeighboursInterface& neighbours); + + /** Gets the data by calling the NeighboursInterface object which was passed + * into the constructor. HOWEVER, it does internal caching, so doesn't call it + * multiple times. + * @param vertex A vertex in the graph. + * @return A cached list of neighbours of that vertex, stored internally. + */ + virtual const std::vector& operator()(size_t vertex) override; + + /** The result of resizing a mapping by deleting fixed vertices if too big, + * or adding new vertices if too small. + */ + struct Result { + /** It is still a success if we have fewer vertices than the desired number + * (as this can still be looked up in the table). However, it's a failure if + * there are too many vertices (which than cannot be looked up). + */ + bool success; + + /** If successful, the edges of the subgraph containing only the vertices in + * the new mapping. */ + std::vector edges; + }; + + /** The mapping may be altered, even upon failure, so obviously the caller + * should make a copy if it needs to be preserved. Increase the map size as + * much as possible if too small (still a success even if it cannot reach the + * size). Decrease the size if too large (and not reaching the szie is then a + * failure). Newly added or removed vertices are all fixed, i.e. map[v]=v. + * @param mapping The mapping which will be altered and returned by reference. + * @param desired_size The size we wish to reach, or as close as possible if + * the mapping is currently too small. + */ + const Result& resize_mapping( + VertexMapping& mapping, unsigned desired_size = 6); + + private: + NeighboursInterface& m_neighbours; + Result m_result; + + // KEY: a vertex. VALUE: all its neighbours. + std::map> m_cached_neighbours; + std::set m_cached_full_edges; + + /** How many edges join the given vertex to other existing vertices? + * @param mapping The current vertex permutation which we may expand or + * contract. + * @param vertex A vertex which may or may not be already within the mapping. + * @return The total number of edges within the LARGER graph joining the + * vertex to other vertices within the mapping. + */ + size_t get_edge_count(const VertexMapping& mapping, size_t vertex); + + /** Try to add a single new fixed vertex to the mapping, i.e. a new v with + * map[v]=v. + * @param mapping The current vertex permutation which we wish to expand by + * one vertex. + */ + void add_vertex(VertexMapping& mapping); + + /** Try to remove a single vertex within the mapping, but only if it is fixed, + * i.e. map[v]==v. + * @param mapping The current vertex permutation which we wish to shrink by + * one vertex. + */ + void remove_vertex(VertexMapping& mapping); + + /** Within the m_result object, fill "edges" for the new mapping. */ + void fill_result_edges(const VertexMapping& mapping); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/TrivialTSA.cpp b/tket/src/TokenSwapping/TrivialTSA.cpp new file mode 100644 index 0000000000..a2053de242 --- /dev/null +++ b/tket/src/TokenSwapping/TrivialTSA.cpp @@ -0,0 +1,311 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TrivialTSA.hpp" + +#include +#include + +#include "CyclicShiftCostEstimate.hpp" +#include "TokenSwapping/DistanceFunctions.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" +#include "Utils/Assert.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { + +// Make an arrow from each nonempty vertex to its target; +// what are the connected components of the resulting directed graph? +// Two different arrows cannot point INTO the same vertex. +// So, EITHER a cycle (so, a, abstract cyclic shift on tokens is performed), +// OR a path, with all except the final vertex being nonempty. +// In either case, we enact a cyclic shift. + +// To find a component, we might have to go backwards along arrows +// as well as forwards. + +TrivialTSA::TrivialTSA(Options options) : m_options(options) { + m_name = "Trivial"; +} + +void TrivialTSA::set(Options options) { m_options = options; } + +bool TrivialTSA::grow_cycle_forwards( + const VertexMapping& vertex_mapping, Endpoints& endpoints) { + auto current_id = endpoints.first; + const auto start_vertex = m_abstract_cycles_vertices.at(current_id); + + // If valid, a single cycle contains at most one empty vertex. + // Thus there are at most N+1 vertices. + for (size_t infin_loop_guard = vertex_mapping.size() + 1; + infin_loop_guard != 0; --infin_loop_guard) { + const auto v1 = m_abstract_cycles_vertices.at(current_id); + const auto citer = vertex_mapping.find(v1); + if (citer == vertex_mapping.cend()) { + // We end at an empty vertex. + endpoints.second = current_id; + return false; + } + if (citer->second == start_vertex) { + // We've hit the start. + endpoints.second = current_id; + return true; + } + current_id = m_abstract_cycles_vertices.insert_after(current_id); + m_abstract_cycles_vertices.at(current_id) = citer->second; + } + TKET_ASSERT(!"TrivialTSA::grow_cycle_forwards: " + "hit vertex count limit; invalid vertex mapping"); + return false; +} + +void TrivialTSA::grow_cycle_backwards(Endpoints& endpoints) { + auto current_id = endpoints.first; + + // In a valid cycle, every vertex but one (the empty vertex) + // is the target of something, and therefore there are <= N+1 vertices. + for (size_t infin_loop_guard = m_reversed_vertex_mapping.size() + 1; + infin_loop_guard != 0; --infin_loop_guard) { + const auto v1 = m_abstract_cycles_vertices.at(current_id); + const auto citer = m_reversed_vertex_mapping.find(v1); + if (citer == m_reversed_vertex_mapping.cend()) { + // Our vertex is not the target of anything. + // So, it's the START. + endpoints.first = current_id; + return; + } + // Remember the reverse order! + current_id = m_abstract_cycles_vertices.insert_before(current_id); + m_abstract_cycles_vertices.at(current_id) = citer->second; + } + TKET_ASSERT(!"TrivialTSA::grow_cycle_backwards: " + "hit vertex count limit; invalid vertex mapping"); +} + +void TrivialTSA::do_final_checks() const { + m_vertices_seen.clear(); + for (const auto& entry : m_reversed_vertex_mapping) { + m_vertices_seen.insert(entry.first); + m_vertices_seen.insert(entry.second); + } + TKET_ASSERT(m_vertices_seen.size() == m_abstract_cycles_vertices.size()); + + // Erase them again...! + for (const auto& endpoints : m_cycle_endpoints) { + for (auto id = endpoints.first;; + id = m_abstract_cycles_vertices.next(id).value()) { + // GCOVR_EXCL_START + TKET_ASSERT( + m_vertices_seen.erase(m_abstract_cycles_vertices.at(id)) == 1); + // GCOVR_EXCL_STOP + if (id == endpoints.second) { + break; + } + } + } + TKET_ASSERT(m_vertices_seen.empty()); +} + +void TrivialTSA::fill_disjoint_abstract_cycles( + const VertexMapping& vertex_mapping) { + m_vertices_seen.clear(); + m_abstract_cycles_vertices.clear(); + m_cycle_endpoints.clear(); + m_reversed_vertex_mapping = get_reversed_map(vertex_mapping); + Endpoints endpoints; + + // Get the disjoint abstract cycles. + for (const auto& entry : vertex_mapping) { + if (m_vertices_seen.count(entry.first) != 0) { + continue; + } + m_abstract_cycles_vertices.push_back(entry.first); + endpoints.first = m_abstract_cycles_vertices.back_id().value(); + if (!grow_cycle_forwards(vertex_mapping, endpoints)) { + grow_cycle_backwards(endpoints); + } + m_cycle_endpoints.push_back(endpoints); + + // Now, add the vertices to vertices seen... + for (auto id = endpoints.first;; + id = m_abstract_cycles_vertices.next(id).value()) { + // GCOVR_EXCL_START + TKET_ASSERT( + m_vertices_seen.insert(m_abstract_cycles_vertices.at(id)).second); + // GCOVR_EXCL_STOP + if (id == endpoints.second) { + break; + } + } + } +} + +void TrivialTSA::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& /*not needed*/, + RiverFlowPathFinder& path_finder) { + append_partial_solution(swaps, vertex_mapping, distances, path_finder); +} + +void TrivialTSA::append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, RiverFlowPathFinder& path_finder) { + if (all_tokens_home(vertex_mapping)) { + return; + } + fill_disjoint_abstract_cycles(vertex_mapping); + do_final_checks(); + + if (m_options == Options::FULL_TSA) { + // OK, below, for a single cycle, we use CyclicShiftCostEstimate + // to estimate, not ONLY the cheapest single cycle, but ALSO + // the start vertex to enact it most cheaply. + // We could do that here also and it might save a bit, + // BUT the full Trivial TSA is really only used for testing now + // so don't bother. + append_partial_solution_with_all_cycles(swaps, vertex_mapping, path_finder); + return; + } + TKET_ASSERT(m_options == Options::BREAK_AFTER_PROGRESS); + // We're only going to do ONE cycle; so find which cycle + // has the shortest estimated number of swaps + size_t best_estimated_concrete_swaps = std::numeric_limits::max(); + Endpoints best_endpoints; + size_t start_v_index = std::numeric_limits::max(); + + for (const auto& endpoints : m_cycle_endpoints) { + copy_vertices_to_work_vector(endpoints); + if (m_vertices_work_vector.size() < 2) { + TKET_ASSERT(m_vertices_work_vector.size() == 1); + continue; + } + const CyclicShiftCostEstimate estimate(m_vertices_work_vector, distances); + // GCOVR_EXCL_START + TKET_ASSERT( + estimate.estimated_concrete_swaps < std::numeric_limits::max()); + TKET_ASSERT(estimate.start_v_index < m_vertices_work_vector.size()); + // GCOVR_EXCL_STOP + if (estimate.estimated_concrete_swaps < best_estimated_concrete_swaps) { + best_estimated_concrete_swaps = estimate.estimated_concrete_swaps; + start_v_index = estimate.start_v_index; + best_endpoints = endpoints; + } + } + // GCOVR_EXCL_START + TKET_ASSERT( + best_estimated_concrete_swaps < std::numeric_limits::max()); + // GCOVR_EXCL_STOP + const auto swap_size_before = swaps.size(); + const auto decrease = append_partial_solution_with_single_cycle( + best_endpoints, start_v_index, swaps, vertex_mapping, distances, + path_finder); + TKET_ASSERT(swap_size_before < swaps.size()); + TKET_ASSERT(decrease > 0); +} + +void TrivialTSA::copy_vertices_to_work_vector(const Endpoints& endpoints) { + m_vertices_work_vector.clear(); + for (auto id = endpoints.first;; + id = m_abstract_cycles_vertices.next(id).value()) { + m_vertices_work_vector.push_back(m_abstract_cycles_vertices.at(id)); + if (id == endpoints.second) { + break; + } + } +} + +void TrivialTSA::append_partial_solution_with_all_cycles( + SwapList& swaps, VertexMapping& vertex_mapping, + RiverFlowPathFinder& path_finder) { + for (const auto& endpoints : m_cycle_endpoints) { + copy_vertices_to_work_vector(endpoints); + if (m_vertices_work_vector.size() < 2) { + continue; + } + // Break the abstract cycle into abstract swaps... + // To shift: [a,b,c,d] -> [d,a,b,c], we do abstract swaps in + // opposite order of the shift direction, i.e. cd bc ab + for (size_t ii = m_vertices_work_vector.size() - 1; ii > 0; --ii) { + // Abstract swap(v1, v2). + const auto v1 = m_vertices_work_vector[ii]; + const auto v2 = m_vertices_work_vector[ii - 1]; + TKET_ASSERT(v1 != v2); + const auto& path = path_finder(v1, v2); + TKET_ASSERT(path.size() >= 2); + append_swaps_to_interchange_path_ends(path, vertex_mapping, swaps); + } + } +} + +size_t TrivialTSA::append_partial_solution_with_single_cycle( + const Endpoints& endpoints, size_t start_v_index, SwapList& swaps, + VertexMapping& vertex_mapping, DistancesInterface& distances, + RiverFlowPathFinder& path_finder) { + copy_vertices_to_work_vector(endpoints); + TKET_ASSERT(m_vertices_work_vector.size() >= 2); + TKET_ASSERT(start_v_index < m_vertices_work_vector.size()); + + // Can go negative! But MUST be >= 1 at the end + // (otherwise this cycle was useless and should never have occurred). + int current_L_decrease = 0; + + // To shift: [a,b,c,d] -> [d,a,b,c], we do abstract swaps in the opposite + // order to the shift direction, i.e. cd bc ab + for (size_t ii = m_vertices_work_vector.size() - 1; ii > 0; --ii) { + // Abstract swap(v1, v2). + const auto v1 = m_vertices_work_vector + [(ii + start_v_index) % m_vertices_work_vector.size()]; + + const auto v2 = m_vertices_work_vector + [((ii - 1) + start_v_index) % m_vertices_work_vector.size()]; + + TKET_ASSERT(v1 != v2); + const auto& path = path_finder(v1, v2); + TKET_ASSERT(path.size() >= 2); + + // e.g., to swap endpoints: [x,a,b,c,y] -> [y,a,b,c,x], + // do concrete swaps xa ab bc cy bc ab xa. + + // xa ab bc cy ...(ascending) + for (size_t jj = 1; jj < path.size(); ++jj) { + current_L_decrease += + get_swap_decrease(vertex_mapping, path[jj], path[jj - 1], distances); + + VertexSwapResult(path[jj], path[jj - 1], vertex_mapping, swaps); + if (current_L_decrease > 0) { + return static_cast(current_L_decrease); + } + } + // Now the reverse: bc ab xa + for (size_t kk = path.size() - 2; kk > 0; --kk) { + current_L_decrease += + get_swap_decrease(vertex_mapping, path[kk], path[kk - 1], distances); + + VertexSwapResult(path[kk], path[kk - 1], vertex_mapping, swaps); + if (current_L_decrease > 0) { + return static_cast(current_L_decrease); + } + } + } + // The cycle MUST have decreased L overall, + // otherwise we shouldn't have done it. + TKET_ASSERT(!"TrivialTSA::append_partial_solution_with_single_cycle"); + return 0; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp new file mode 100644 index 0000000000..940c3e2bbc --- /dev/null +++ b/tket/src/TokenSwapping/VectorListHybridSkeleton.cpp @@ -0,0 +1,311 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "VectorListHybridSkeleton.hpp" + +#include +#include +#include + +#include "Utils/Assert.hpp" + +namespace tket { +namespace tsa_internal { + +using Index = VectorListHybridSkeleton::Index; + +const Index INVALID_INDEX = std::numeric_limits::max(); + +Index VectorListHybridSkeleton::get_invalid_index() { return INVALID_INDEX; } + +VectorListHybridSkeleton::VectorListHybridSkeleton() + : m_size(0), + m_front(INVALID_INDEX), + m_back(INVALID_INDEX), + m_deleted_front(INVALID_INDEX) {} + +void VectorListHybridSkeleton::clear() { + if (m_links.empty()) { + TKET_ASSERT(m_size == 0); + TKET_ASSERT(m_front == INVALID_INDEX); + TKET_ASSERT(m_back == INVALID_INDEX); + TKET_ASSERT(m_deleted_front == INVALID_INDEX); + return; + } + m_size = 0; + m_front = INVALID_INDEX; + m_back = INVALID_INDEX; + for (Index nn = 1; nn < m_links.size(); ++nn) { + // Not strictly necessary, as deleted links are only a forward list; + // but make absolutely sure no leakage of prior internal link data can + // occur. + m_links[nn].previous = nn - 1; + m_links[nn - 1].next = nn; + } + m_links[0].previous = INVALID_INDEX; + m_links.back().next = INVALID_INDEX; + m_deleted_front = 0; +} + +void VectorListHybridSkeleton::fast_clear() { + if (m_back == INVALID_INDEX) { + // No elements stored currently; nothing to do. + TKET_ASSERT(m_size == 0); + TKET_ASSERT(m_front == INVALID_INDEX); + return; + } + TKET_ASSERT(m_size > 0); + TKET_ASSERT(m_front != INVALID_INDEX); + TKET_ASSERT(m_links[m_back].next == INVALID_INDEX); + // There are some existing elements. + // Recall that deleted elements are ONLY a forward list, + // so we don't need to update "previous". + // To combine existing active elements with + // existing deleted elements, + // the valid elements will be joined to + // the start of the deleted list. + if (m_deleted_front != INVALID_INDEX) { + m_links[m_back].next = m_deleted_front; + } + // Convert "active" elements into deleted elements. + m_deleted_front = m_front; + m_front = INVALID_INDEX; + m_back = INVALID_INDEX; + m_size = 0; +} + +void VectorListHybridSkeleton::reverse() { + if (m_size <= 1) { + // Nothing to do. + return; + } + TKET_ASSERT(m_front != INVALID_INDEX); + TKET_ASSERT(m_back != INVALID_INDEX); + TKET_ASSERT(m_front != m_back); + // The deleted element links don't need to change. + { + auto current_index = m_front; + bool terminated_correctly = false; + for (auto infinite_loop_guard = 1 + m_links.size(); infinite_loop_guard > 0; + --infinite_loop_guard) { + auto& link = m_links[current_index]; + const auto next_index = link.next; + std::swap(link.next, link.previous); + if (next_index >= m_links.size()) { + TKET_ASSERT(next_index == INVALID_INDEX); + terminated_correctly = true; + break; + } + current_index = next_index; + } + TKET_ASSERT(terminated_correctly); + } + std::swap(m_front, m_back); +} + +size_t VectorListHybridSkeleton::size() const { return m_size; } + +Index VectorListHybridSkeleton::front_index() const { return m_front; } + +Index VectorListHybridSkeleton::back_index() const { return m_back; } + +Index VectorListHybridSkeleton::next(Index index) const { + return m_links[index].next; +} + +Index VectorListHybridSkeleton::previous(Index index) const { + return m_links[index].previous; +} + +void VectorListHybridSkeleton::erase(Index index) { + --m_size; + auto& current_link = m_links[index]; + if (current_link.previous == INVALID_INDEX) { + // We're erasing the front. + m_front = current_link.next; + } else { + m_links[current_link.previous].next = current_link.next; + } + if (current_link.next == INVALID_INDEX) { + // We're erasing the back. + m_back = current_link.previous; + } else { + m_links[current_link.next].previous = current_link.previous; + } + // Recall: deleted elements are a forward list ONLY. + current_link.next = m_deleted_front; + m_deleted_front = index; +} + +void VectorListHybridSkeleton::erase_interval( + Index index, size_t number_of_elements) { + if (number_of_elements == 0) { + return; + } + // First, find the index of the LAST element to be erased. + // Notice that this is the only O(N) part; the rest are O(1). + // We update only O(1) links in total, not O(N), + // so slightly faster than a loop of next/erase calls. + Index last_element_index = index; + for (size_t nn = 1; nn < number_of_elements; ++nn) { + last_element_index = m_links.at(last_element_index).next; + + // GCOVR_EXCL_START + TKET_ASSERT( + last_element_index < m_links.size() || + AssertMessage() << "erase_interval with start index " << index + << ", number_of_elements=" << number_of_elements + << ", size " << m_links.size() + << ", runs out of elements at N=" << nn + << " (got index " << last_element_index << ")"); + // GCOVR_EXCL_STOP + } + TKET_ASSERT(number_of_elements <= m_size); + m_size -= number_of_elements; + + // Now, splice the soon-to-be-logically-erased interval into the deleted + // elements. Start the new deleted list at the erased interval. + const auto index_of_node_after_interval = m_links[last_element_index].next; + + // Correct whether or not m_deleted_front equals INVALID_INDEX. + m_links[last_element_index].next = m_deleted_front; + // No need to update previous, since the deleted nodes are only a forward + // list. + m_deleted_front = index; + + // Link the node BEFORE the interval to the new next node. + const auto index_of_node_before_interval = m_links[index].previous; + + if (index_of_node_before_interval < m_links.size()) { + // There IS a previous node to be dealt with. + auto& next_node_index_ref = m_links[index_of_node_before_interval].next; + TKET_ASSERT(next_node_index_ref == index); + // This is correct even if index_of_node_after_interval is INVALID_INDEX. + next_node_index_ref = index_of_node_after_interval; + TKET_ASSERT(m_front != index); + } else { + // No previous node, we must have been at the start already. + TKET_ASSERT(index_of_node_before_interval == INVALID_INDEX); + TKET_ASSERT(m_front == index); + m_front = index_of_node_after_interval; + } + // Link the node AFTER the interval to the new previous node. + if (index_of_node_after_interval < m_links.size()) { + // There are more unerased elements after the interval, + // so the first one must be dealt with. + auto& prev_node_index = m_links[index_of_node_after_interval].previous; + TKET_ASSERT(prev_node_index == last_element_index); + // Correct even if there IS no node before the interval. + prev_node_index = index_of_node_before_interval; + TKET_ASSERT(m_back != last_element_index); + } else { + // No node after, we have erased up to the back. + TKET_ASSERT(index_of_node_after_interval == INVALID_INDEX); + TKET_ASSERT(m_back == last_element_index); + m_back = index_of_node_before_interval; + } + if (m_size == 0) { + TKET_ASSERT(m_front == INVALID_INDEX); + TKET_ASSERT(m_back == INVALID_INDEX); + } else { + TKET_ASSERT(m_front < m_links.size()); + TKET_ASSERT(m_back < m_links.size()); + if (m_size == 1) { + TKET_ASSERT(m_front == m_back); + } + } +} + +void VectorListHybridSkeleton::insert_for_empty_list() { + const auto new_index = get_new_index(); + m_front = new_index; + m_back = new_index; + m_links[new_index].next = INVALID_INDEX; + m_links[new_index].previous = INVALID_INDEX; +} + +void VectorListHybridSkeleton::insert_after(Index index) { + const auto new_index = get_new_index(); + const auto old_next = m_links[index].next; + m_links[index].next = new_index; + m_links[new_index].next = old_next; + m_links[new_index].previous = index; + if (old_next == INVALID_INDEX) { + // The old element was already at the back. + m_back = new_index; + } else { + m_links[old_next].previous = new_index; + } +} + +void VectorListHybridSkeleton::insert_before(Index index) { + const auto new_index = get_new_index(); + const auto old_prev = m_links[index].previous; + m_links[index].previous = new_index; + m_links[new_index].next = index; + m_links[new_index].previous = old_prev; + if (old_prev == INVALID_INDEX) { + // The old element was already at the front. + m_front = new_index; + } else { + m_links[old_prev].next = new_index; + } +} + +Index VectorListHybridSkeleton::get_new_index() { + ++m_size; + if (m_deleted_front == INVALID_INDEX) { + // We need to create a new element, it's full. + m_links.emplace_back(); + return m_links.size() - 1; + } + // Reuse a deleted element. + const auto old_deleted_front = m_deleted_front; + m_deleted_front = m_links[old_deleted_front].next; + return old_deleted_front; +} + +std::string VectorListHybridSkeleton::debug_str() const { + std::stringstream ss; + const auto to_str = [](size_t ii) -> std::string { + if (ii == INVALID_INDEX) { + return "NULL"; + } + return std::to_string(ii); + }; + + ss << "VLHS: size " << m_size << ", front " << to_str(m_front) << " back " + << to_str(m_back) << ", del.front " << to_str(m_deleted_front); + + ss << "\nActive links: forward ["; + for (auto index = m_front; index != INVALID_INDEX; + index = m_links[index].next) { + ss << index << "->"; + } + ss << "]\nBackward ("; + for (auto index = m_back; index != INVALID_INDEX; + index = m_links[index].previous) { + ss << index << "->"; + } + ss << ")\nDel.links: {"; + for (auto index = m_deleted_front; index != INVALID_INDEX; + index = m_links[index].next) { + ss << index << "->"; + } + ss << "}"; + return ss.str(); +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp new file mode 100644 index 0000000000..57fbe48834 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/BestFullTsa.hpp @@ -0,0 +1,57 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "HybridTsa.hpp" +#include "SwapListOptimiser.hpp" +#include "SwapListTableOptimiser.hpp" + +namespace tket { + +/** This class combines all the different token swapping components together + * in the best known way to get the best overall end-to-end routine + * (including different heuristics, parameters etc. whose optimal values + * are unknown, and require experimentation). + */ +class BestFullTsa : public tsa_internal::PartialTsaInterface { + public: + BestFullTsa(); + + /** We emphasise that, unlike the general PartialTsaInterface, the solution + * returned is complete, AND includes all known swap list optimisations. + * Warning: unlike most PartialTsaInterface objects, the vertex_mapping + * is NOT updated. (There's no point for a full TSA). + * @param swaps The list of swaps to append to (does not clear first). + * @param vertex_mapping The current desired mapping, giving (current source + * vertex)->(target vertex) mappings. NOT updated at the end. + * @param distances An object to calculate distances between vertices. + * @param neighbours An object to calculate adjacent vertices to any given + * vertex. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. (Of course, paths might not be unique if the graph + * is not a tree). + */ + virtual void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + tsa_internal::RiverFlowPathFinder& path_finder) override; + + private: + tsa_internal::HybridTsa m_hybrid_tsa; + tsa_internal::SwapListOptimiser m_swap_list_optimiser; + tsa_internal::SwapListTableOptimiser m_table_optimiser; +}; + +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp new file mode 100644 index 0000000000..98b37bf9b7 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/CanonicalRelabelling.hpp @@ -0,0 +1,109 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "TokenSwapping/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +// PERMUTATION HASH EXPLANATION: +// Certain permutations on the vertices [0,1,2,3,4,5] are represented by an +// unsigned value, the "permutation hash". In fact, ANY permutation on a list of +// ANY 6 distinct objects can be reduced to one of these by a suitable vertex +// relabelling, which is what this class is for. +// +// Note that not every permutation on [0,1,2,3,4,5] corresponds to a permutation +// hash (in fact, very few do); most of them still need relabelling, just as for +// arbitrary labels. +// +// The permutation hashes are done by taking a partition of 6, with parts in +// decreasing order, e.g. 6 = 3+2+1 = 4+2 = 3+3 = 2+2+1+1, etc. We remove all +// the 1 entries, and stick the digits together into a single decimal: +// +// 3+2+1 -> 32, 4+2 -> 42, 2+2+1+1 -> 22 +// +// Each digit represents the length of a slice of the 6 elements 012345: +// +// 32 -> (012)(34)(5), 42 -> (0123)(45), 22 -> (01)(23)(4)(5). +// +// The notation (abcd) represents a cyclic shift on the elements a,b,c,d. +// Thus a -> b -> c -> d -> a. +// +// EVERY permutation on 6 ARBITRARY distinct objects is equivalent to one of +// these, after suitable vertex relabelling. This follows because permutations +// can be decomposed into disjoint cycles. +// + +/** Given a permutation with arbitrary vertex labels, currently size <= 6, we + * want to relabel the vertices so that we can look up an isomorphic mapping in + * a table. This class gives one possible way. Still some scope for research and + * improvement here; we want to cut down the number of "isomorphic" copies as + * much as possible (whatever "isomorphic" means in this context) to make the + * lookup table fairly small. + */ +class CanonicalRelabelling { + public: + /** For looking up mappings in the table. */ + struct Result { + /** Will be empty if there are too many vertices. (Current limit is 6, + * although this may be updated in future). */ + VertexMapping old_to_new_vertices; + + /** Element[i], for new vertex i, is the old vertex number which corresponds + * to i. Empty if too many vertices. + */ + std::vector new_to_old_vertices; + + /** Set equal to zero if too many vertices. Any permutation on <= 6 vertices + * is assigned a number, to be looked up in the table. 0 is the identity + * permutation. */ + unsigned permutation_hash; + + /** Were there too many vertices in the mapping to look up in the table? */ + bool too_many_vertices; + + /** Was it the identity mapping? If so, no need to relabel OR look up in a + * table. */ + bool identity; + }; + + /** The returned Result object is stored internally. + * @param desired_mapping A (source vertex) -> (target vertex) permutation on + * arbitrary vertex labels. + * @return An object withe information such as (1) how to relabel vertices; + * (2) The permutation on NEW vertices, for looking up in a table. + */ + const Result& operator()(const VertexMapping& desired_mapping); + + CanonicalRelabelling(); + + private: + Result m_result; + + VertexMapping m_desired_mapping; + VertexMapping m_work_mapping; + + /** The relabelling/permutation hashing is all based upon decomposing an + * arbitrarily labelled permutation into disjoint cycles, then relabelling the + * vertices within the cycles in a reasonable way. + */ + std::vector> m_cycles; + + /** The indices in "m_cycles" after sorting appropriately. */ + std::vector m_sorted_cycles_indices; +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp new file mode 100644 index 0000000000..de4a187e15 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclesCandidateManager.hpp @@ -0,0 +1,189 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "CyclesGrowthManager.hpp" +#include "PartialTsaInterface.hpp" + +namespace tket { +namespace tsa_internal { + +/** Concerned with filtering and selecting candidate cycles + * to convert into a swap sequence. Used by CyclesPartialTsa. + * For further explanation, please see the comments for the class + * CyclesPartialTsa. + * + * This is used when all cycles are valid candidates to be converted + * into swap sequences. This class selects the ones to use. + * All cycle candidates are assumed to have the same length + * (swaps are just cycles on 2 vertices), but have different "power", + * i.e. different overall contribution to the decrease of L, the sum of + * the distances between the current vertex of each token and its target. + * + * We only want to return solutions which strictly decrease L, so that + * we're guaranteed to make progress (or make no change). + * We must select a subset of disjoint cycles, since if they + * were not disjoint, the returned solution might not decrease L. + * (We based all our calculations on treating the cycles individually, + * so obviously non-disjoint cycles could behave very differently). + */ +class CyclesCandidateManager { + public: + /** These control the behaviour of filtering for candidate selection. + * Experimentation needed to find the best options. + */ + struct Options { + // In both these options, we have a whole collection of candidate + // swap sequences. + // We can EITHER perform just the best single candidate, + // OR carry out multiple swap sequences simultaneously, + // by selecting a large disjoint subset. + // However, returning multiple sequences, although probably faster + // to compute overall, might give a worse end-to-end solution + // (but this needs testing). (But of course it may actually be slower. + // All these are just guesses, need testing!) + // The reason is that, once the tokens + // have shifted a little bit, it may enable better solutions + // (sequences of higher power) which the algorithm previously + // did not detect. + + /** Setting this to "false" means that only the best single swaps + * will be returned, the others being discarded. (E.g., if some swaps + * move two tokens closer to home, i.e. have "power" two, then + * "power one" swaps - those which only move one token closer to home, + * the other token being empty, or remaining at the same distance from + * its target - will be discarded). + */ + bool return_all_good_single_swaps = false; + + /** The same as "return_all_good_single_swaps", but for cycles + * on >= 3 vertices. Do we return ALL cycle solutions, or only those + * which decrease L by the largest amount? + */ + bool return_lower_power_solutions_for_multiswap_candidates = false; + + /** The "power" of a swap sequence is (total L decrease) / (number of + * swaps). Since a swap can change L by -2,-1,0,1,2 (since up to 2 tokens + * are moved one step), always |power| <= 2. But let's assume that negative + * power candidates are discarded, and rescale to be a percentage. Discard + * all candidates with power percentage smaller than this. Note that only + * fairly dense problems (lots of tokens, or all clustered close together) + * are likely to give higher powers; if all tokens are far apart, or there + * are very few of them, then swapping two nonempty tokens is rare, so + * immediately most candidates would not expect to reach even 50% power. + */ + unsigned min_candidate_power_percentage = 0; + }; + + /** The "CyclesGrowthManager" object stores the candidate cycles internally, + * then we select the set of candidates to use, convert them into swaps, + * and append them to the list of swaps. (All distance data has already + * been calculated and ctored within the cycles). + * + * @param growth_manager The object containing the candidate cycles + * @param swaps The list of swaps we will add to, once we convert + * the candidates into swaps. + * @param vertex_mapping The current vertex->target vertex mapping, + * which will be updated with the added swaps. + */ + void append_partial_solution( + const CyclesGrowthManager& growth_manager, SwapList& swaps, + VertexMapping& vertex_mapping); + + private: + Options m_options; + + /** Information about the stored candidates, for filtering. */ + struct CycleData { + Cycles::ID id; + + /** The vertices are listed in a vector. + * Store the index, in the vector, of the lowest valued vertex. + * The purpose is to detect duplicate stored cycles (starting from + * a different vertex) and discard all but one of them. + * (Unfortunately necessary because, as cycles are being built up, + * we don't know which final vertices will occur, so we can get many + * duplicate subpaths. Is there a clever data structure to improve this?) + */ + size_t first_vertex_index; + }; + + /** Key: a hash of the vertices in the cycle + * Value: information about the candidate cycles of the last cycle + * with that hash. (Hash collisions are expected to be very rare, and they + * cause no actual problem, so it's probably faster not to use complete + * buckets to resolve hash collisions). + * Used to find duplicate cycles (the same vertices in the same cyclic + * order, but with different start vertex in the vector). + */ + std::map m_cycle_with_vertex_hash; + + /** We will discard duplicate cycles. For better constness, we don't delete + * cycles, we just store the IDs of those ones we want to use. + */ + std::vector m_cycles_to_keep; + + /** Key: a cycle ID + * Value: how many other cycles it touches (i.e., cycles sharing a vertex + * with it, so not disjoint). + * This will be used to select a large subset of pairwise disjoint + * cycles, with a simple greedy algorithm. + */ + std::map m_touching_data; + + /** Used by should_add_swaps_for_candidate, to see whether a cycle + * is disjoint from those already selected. + */ + std::set m_vertices_used; + + /** Fills m_cycles_to_keep (so, effectively discarding unsuitable cycles), + * returns the common cycle length. + * @param cycles The complete collection of candidate cycles. + * @return The number of vertices in each cycle + * (all cycles should be the same length). + */ + size_t fill_initial_cycle_ids(const Cycles& cycles); + + /** Updates m_cycles_to_keep. Keep only those solutions with the + * highest L-decrease. + * @param cycles The complete collection of candidate cycles, + * but we already have filled m_cycles_to_keep so will + * only consider those cycles. + */ + void discard_lower_power_solutions(const Cycles& cycles); + + /** Sorts m_cycles_to_keep so that those which touch + * the fewest other cycles are listed first. + * @param cycles The complete collection of candidate cycles, + * but we only consider those cycles with IDs in m_cycles_to_keep. + */ + void sort_candidates(const Cycles& cycles); + + /** Checks if the candidate is disjoint from all other candidates + * currently used (stored in m_vertices_used). If so updates + * m_vertices_used and returns true (but takes no other action). + * Otherwise, do nothing and return false. + * @param cycles The complete collection of candidate cycles. + * @param id The single cycle under consideration. + * @return whether this single cycle should be added to the collection + * of candidates. + */ + bool should_add_swaps_for_candidate(const Cycles& cycles, Cycles::ID id); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp new file mode 100644 index 0000000000..68549cde8a --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclesGrowthManager.hpp @@ -0,0 +1,234 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "TokenSwapping/DistancesInterface.hpp" +#include "TokenSwapping/NeighboursInterface.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** Contains information about a cyclic shift. Note that "moves" + * are not swaps; they are "half swaps". I.e., a move v1->v2 + * means that we pretend that v2 has no token on it, and see + * what would happen if we moved the token on v1 to v2, ignoring + * whatever token is on v2. + * It's important to realise that moves are impossible to do by themselves, + * if both vertices contain tokens; it is only SEQUENCES of moves + * which may sometimes be converted into swaps. + * For example, assuming that edges v0-v1 and v1-v2 exist, + * the length 3 move sequence v0->v1->v2->v0 + * may be enacted by 2 swaps (v0, v1) . (v1, v2). + * Notice that the edge v0-v2 does NOT have to exist. Also, this cyclic shift + * is still possible in 2 swaps if any 2 of the 3 edges v0-v1, v1-v2, v0-v2 + * exist. + */ +struct Cycle { + /** By how much would L (the sum of distances from current vertex + * to target vertex) decrease? Can be positive or negative. + * It has two different interpretations: + * for the first, for OPEN cycles, + * we simply IGNORE the token on v(N), the last vertex, + * and store the decrease for the partial cyclic shift + * v0->v1->v2->v3-> ... -> v(N), AS IF there were no token on v(N). + * + * For the second interpretation, once "attempt_to_close_cycles" + * has returned true, this switches meaning to the L-decrease for + * the FULL cycle, i.e. including the v(N)->v(0) decrease. + */ + int decrease; + + /** The abstract move sequence moves each vertex to the next in the list. + * When the cycle is closed, the final vertex moves back to the start. + * [v0,v1,v2,v3,...,vN] must be a genuine path (the edges must exist), + * BUT the edge vN -> v0 to close the cycle does NOT have to exist. + */ + std::vector vertices; + + /** We need this to maintain paths without duplicate vertices. + * Maintaining a std::set of vertices for quick lookup would work, + * BUT actually "vertices" is always quite small, + * so just do a linear search. + * @param vertex A vertex + * @return whether that vertex already exists in "vertices". + */ + bool contains(size_t vertex) const; +}; + +typedef VectorListHybrid Cycles; + +/** Concerned only with growing cycles and closing them. + * For use in CyclesPartialTsa. We build up collections of cycles + * with information about what would happen if it were closed, + * i.e. the complete cycle were performed somehow, + * and also ensure when we grow cycles that the newly added vertex + * is not already present. + * + * Note that longer cycles need more swaps, so our heuristic is to prefer + * shorter cycles, if all else is equal. + * (In the best possible case, if every + * abstract token move v(i)->v(i+1) moved one token closer to home, + * then the total L-decrease would be V, for V vertices, but would need + * V+1 swaps to perform, for a "power" (L-decrease per swap) of (V+1)/V + * which is actually decreasing in V). + * [Of course it's only a heuristic, not necessarily optimal, because + * doing short-term worse moves now might allow better moves + * in the long term - always the problem with optimisation]. + */ +class CyclesGrowthManager { + public: + /** These control the behaviour; experimentation is needed + * to find the best values. + */ + struct Options { + size_t max_cycle_size = 6; + + /** The worst-case total number of cycles grows exponentially, + * e.g. the complete graph with n vertices has ~ 0.5 n^2 edges, + * but >> 2^n cycles. + * + * We avoid exponential time/space blowup by limiting the number + * of cycles under consideration; any more are just discarded. + */ + size_t max_number_of_cycles = 1000; + + /** Discard a partially built up cycle as soon as the L-decrease + * (decrease of total distances of vertices from their targets) + * drops below this. + * + * Larger values should lead to a more "aggressive", "greedy-like" + * algorithm, which MAY be better - more experimentation needed. + * + * This can even be negative, giving cycles the chance to be initially bad, + * but later turn good. + */ + int min_decrease_for_partial_path = 0; + + /** Similar to "min_decrease_for_partial_path", but expressed + * in terms of "power". Power is defined as (L-decrease)/(number of moves), + * which is always between -1 and +1 since each move (NOT a swap!) + * changes L by one of -1,0,+1. + * Express as a percentage to handle fractions. + * The partial cycle will be discarded unless BOTH criteria using + * min_decrease_for_partial_path AND this are satisfied. + */ + int min_power_percentage_for_partial_path = 0; + }; + + /** Access the options, to change if desired. */ + Options& get_options(); + + /** Simply returns the stored cycles. For an extra security check: + * unless you request otherwise (e.g., for debugging purposes), + * you can ONLY extract the cycles once they are + * converted into good candidates by "attempt_to_close_cycles". + * Note that some cycles may be repeated, e.g. [v0, v1, v2] and [v1, v2, v0] + * might both occur; further filtering is necessary. + * + * Of course [v2, v1, v0] would be a totally different cycle, + * as the direction is reversed. + * + * @param throw_if_cycles_are_not_candidates The intended use is to call this + * function only once candidates arise. If you call this function + * without having candidates, then it throws if this is set to true (the + * default value). But for testing/debugging, it is helpful to call this + * function just to inspect the cycles, and so this parameter should be set to + * false. + * @return The stored cycles. + */ + const Cycles& get_cycles( + bool throw_if_cycles_are_not_candidates = true) const; + + /** Start a new problem. The next function to call is + * "attempt_to_close_cycles". Of course, swaps are just cycles with 2 + * vertices. + * @param vertex_mapping Where does each vertex want to move? (I.e., it's + * a current vertex -> target vertex map). Can be partial, i.e. not every + * vertex has to have a token. + * @param distances Object to calculate distances between vertices + * @param neighbours Object to calculate vertices adjacent to a given vertex + * @return True if it found at least some good moves, false if it couldn't + * find ANY good moves (which must mean that all tokens are home). + * Recall that a move is only a "half swap". + */ + bool reset( + const VertexMapping& vertex_mapping, DistancesInterface& distances, + NeighboursInterface& neighbours); + + /** For each cycle, see what would happen if we performed the full cycle + * (i.e., "closed the cycle"). + * The current cycles are stored as paths [v0, v1, ..., vn], where the edges + * v(i) <-> v(i+1) exist, for 0 <= i < n. + * Even if the edge v(n)->v(0) does not exist, the cycle is POSSIBLE + * by "swapping along" the path [v0, v1, ..., vn]. The end result is a cyclic + * shift. If at least one cycle could be closed to create a viable candidate + * (giving a net decrease in L), return true and delete all cycles which are + * NOT candidates, and also fill in the L-decrease values for the CLOSED + * cycle. If NO cycle closures give a good result, do nothing and return + * false. + * @param vertex_mapping The desired (source vertex->target vertex) mapping, + * for the current locations of tokens on vertices. + * @param distances Object to calculate distances, used to calculate + * L-decreases. + * @return True if at least one good closed cycle exists (i.e., giving net + * strict decrease of L). If so, all non-good cycles are deleted. If no good + * closed cycle exists, do nothing and return false. + */ + bool attempt_to_close_cycles( + const VertexMapping& vertex_mapping, DistancesInterface& distances); + + /** Record what happens when we try to GROW cycles (i.e., increase the length + * of each stored cycle by one, discarding all those which could not grow). + */ + struct GrowthResult { + /** If TRUE, there are no more cycles to consider; finish. */ + bool empty = false; + + /** If we're already at the length limit, delete all cycles. + * (There is no further use for them, so this is safest). + * However, this is not the only possible way for all cycles to be deleted. + * There might not be any other vertices in the graph to add; + * or they might all be bad cycles (i.e., not decreasing L by enough to + * keep them). + */ + bool hit_cycle_length_limit = false; + }; + + /** For each existing cycle, try all possible ways to extend it + * by one step from the last vertex. + * Keep all new cycles generated in this way with a good L decrease, + * and discard all others (including the original cycle). + * Thus, all cycles should have the same number of vertices, increasing + * by one each time this function is called (unless they are all deleted). + * @param vertex_mapping The current desired (source vertex -> target vertex) + * mapping. + * @param distances Object to calculate distances, used to calculate + * L-decreases. + * @param neighbours Object to calculate adjacent vertices to a given vertex. + * @return What happened when we tried to grow the cycles. + */ + GrowthResult attempt_to_grow( + const VertexMapping& vertex_mapping, DistancesInterface& distances, + NeighboursInterface& neighbours); + + private: + Cycles m_cycles; + Options m_options; + bool m_cycles_are_candidates = false; +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp new file mode 100644 index 0000000000..e0ebe7185f --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/CyclesPartialTsa.hpp @@ -0,0 +1,103 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "CyclesCandidateManager.hpp" +#include "PartialTsaInterface.hpp" + +namespace tket { +namespace tsa_internal { + +/** A partial TSA (token swapping algorithm), similar to the cycle-finding + * algorithm as described in the 2016 paper "Approximation and Hardness of + * Token Swapping" by T.Miltzow and others: + * + * https://arxiv.org/abs/1602.05150 + * + * However, our algorithm differs from the paper in several important ways: + * (1) We also accept partial mappings, i.e. there might not be a token on + * every vertex. (2) It is only a partial TSA, not a full TSA (it may give up + * early). Thus, a full end-to-end solution must combine this with another TSA). + * (3) It does not detect long cycles. (4) It never returns "unhappy + * swaps", and is strictly monotonic: L, the sum of distances of a vertex to its + * target, either strictly decreases, or stays the same and no swaps are + * performed. (However, within each cycle, it is possible to have bad swaps + * which don't decrease L much, or even increase L, as long as the + * overall result is a decrease in L). (5) The closing edge of a cycle is not + * required to exist in the graph. + * + * Thus, neither this nor the algorithm in the paper is a generalisation of or + * necessarily better/worse than the other. + * + * One of the ideas in the 2016 paper is to detect good cycles (cyclic shifts) + * v0->v1-> ... ->vn->v0, by searching for cycles in a directed graph. + * It is guaranteed to find cycles if they exist, no matter the length. So, by + * (3), it is better than ours in this sense. However, we don't need the full + * cycle to exist, by (5), since we swap along the path [v0,v1,v2,...,vn]. + * Hence, the paper algorithm is worse than ours in this respect. Regarding (2) + * and (4), the paper is better than ours because it always completes. + */ +class CyclesPartialTsa : public PartialTsaInterface { + public: + CyclesPartialTsa(); + + /** Calculate a solution to improve the current token configuarion, + * add the swaps to the list, and carry out the swaps on "vertex_mapping". + * We don't need a path finder because the cycles are built up one vertex + * at a time, so we only need distances and neighbours. + * There is no point in calling this multiple times; + * it will continue until EITHER all tokens are home, OR it gives up. + * @param swaps The list of swaps to add to. + * @param vertex_mapping The current state, giving vertex->target mappings. + * Will be updated if any new swaps are performed. + * @param distances An object to calculate distances between vertices. + * @param neighbours An object to calculate the neighbours of a vertex. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. (Of course, paths might not be unique if the graph + * is not a tree, so it is an important part of the heuristics that + * the returned paths are fairly "consistent", i.e. "nearby" vertex pairs + * should return "nearby" paths). + */ + virtual void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + RiverFlowPathFinder& path_finder) override; + + private: + /** Stores cycles, and controls the growth and discarding of cycles. + * We grow the cycles one vertex at a time until we reach a good cycle + * which is worth turning into swaps. + * If we never find a good cycle then we give up without returning a + * solution. + */ + CyclesGrowthManager m_growth_manager; + + /** Controls the final selection of cycles to perform. Once we've found + * some good cycles, we may not be able to perform all of them + * (because they might not be disjoint, so interfere with each other). + * We may not even want to perform them all, depending upon the options. + */ + CyclesCandidateManager m_candidate_manager; + + /** "append_partial_solution" simply loops, calling this repeatedly until + * it gives up, or all tokens are home. + */ + void single_iteration_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp new file mode 100644 index 0000000000..ae72a71a0a --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/DistanceFunctions.hpp @@ -0,0 +1,70 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "TokenSwapping/DistancesInterface.hpp" +#include "VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** The sum of the distances of each nonempty token to its home. + * (This is also referred to as "L" in various places, coming from the 2016 + * paper "Approximation and Hardness of Token Swapping"). + * @param vertex_mapping (current vertex where a token lies)->(target vertex) + * mapping. + * @param distances An object to calculate distances between vertices. + * @return the sum, over all tokens, of (current vertex)->(target vertex) + * distances. + */ +size_t get_total_home_distances( + const VertexMapping& vertex_mapping, DistancesInterface& distances); + +/** For just the abstract move v1->v2, ignoring the token on v2, + * by how much does L (the total distances to home) decrease? + * @param vertex_mapping current source->target mapping. + * @param v1 First vertex. + * @param v2 Second vertex. Not required to be adjacent to v1. + * @param distances An object to calculate distances between vertices. + * @return The amount by which L = get_total_home_distances would decrease, + * IF we moved the token on v1 to v2, IGNORING the token currently on v2 + * (which of course is impossible to do in reality if there is a token on + * v2), and leaving all other tokens unchanged. Doesn't have to be positive, of + * course, although positive numbers are good. + */ +int get_move_decrease( + const VertexMapping& vertex_mapping, size_t v1, size_t v2, + DistancesInterface& distances); + +/** The same as get_move_decrease, but for an abstract swap(v1,v2). + * @param vertex_mapping current source->target mapping. + * @param v1 First vertex. + * @param v2 Second vertex. Not required to be adjacent to v1. + * @param distances An object to calculate distances between vertices. + * @return The amount by which L = get_total_home_distances would decrease, + * (which does not have to be a positive number), + * IF the tokens currently on v1,v2 were swapped, and all other tokens + * left unchanged. + */ +int get_swap_decrease( + const VertexMapping& vertex_mapping, size_t v1, size_t v2, + DistancesInterface& distances); + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp new file mode 100644 index 0000000000..44731f3690 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/DistancesInterface.hpp @@ -0,0 +1,66 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +namespace tket { + +/** What is the distance between any two vertices on a graph? + * To save time and cope with larger, sparse graphs, it may + * calculate distances only when required. + */ +class DistancesInterface { + public: + /** Not const because there might be caching, dynamic stuff going on. + * Find the distance between v1,v2. + * @param vertex1 First vertex + * @param vertex2 Second vertex + * @return distance from v1 to v2 within the graph. + */ + virtual size_t operator()(size_t vertex1, size_t vertex2) = 0; + + /** If you KNOW a path from v1 to v2 which is shortest, then + * extra information about distances can be deduced from subpaths + * (each subpath must also be a shortest path: otherwise, the whole path + * would not be of minimum length). + * Does nothing unless overridden. + * @param path A sequence [v0,v1, v2, ..., vn] of vertices, KNOWN to be a + * shortest path from v0 to vn. The caller must not call this without being + * SURE that it really is a shortest path, or incorrect results may occur. + */ + virtual void register_shortest_path(const std::vector& path); + + /** If you know the neighbours of a vertex, you can tell this class + * and it MIGHT choose to cache the distances. + * Simply calls register_neighbours(v1, v2) repeatedly, unless overridden. + * @param vertex A vertex. + * @param neighbours A list of vertices adjacent to the given vertex. + */ + virtual void register_neighbours( + size_t vertex, const std::vector& neighbours); + + /** Does nothing unless overridden. Stores the fact that v1,v2 are adjacent, + * to save later recalculation. + * @param vertex1 First vertex + * @param vertex2 Second vertex + */ + virtual void register_edge(size_t vertex1, size_t vertex2); + + virtual ~DistancesInterface(); +}; + +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp b/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp new file mode 100644 index 0000000000..c4be18d7c3 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/DynamicTokenTracker.hpp @@ -0,0 +1,94 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "TokenSwapping/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** Tracks which token is on which vertex; + * every vertex has a different token. + * Only intended for a specific optimisation pass in SwapListOptimiser. + * Does not require contiguous vertex numbers or token numbers. + * Does not require to be initialised with all vertices at the start. + * Thus, operations take time O(log N), with N being the current number + * of vertices seen, NOT the total number of vertices. + * The tokens are "artificial", i.e. nothing to do with an actual + * Token Swapping problem; they are there to track full vertex mappings + * induced by a sequence of swaps. + */ +class DynamicTokenTracker { + public: + /** Call before starting a new sequence of swaps. */ + void clear(); + + /** Logically the same effect as clear, but doesn't actually clear. + * Instead, fills existing map entries. + * Should be a bit faster for many reuses than clearing every time, + * because it will need fewer tree rebalances inside the maps. + */ + void reset(); + + /** Swap the tokens at the given vertices, + * and return the TOKENS that were swapped. + * Note that every vertex is assumed initially to have a token + * with the same vertex value (i.e., the token equals the INITIAL + * vertex). Thus we don't need to know in advance which vertices + * exist, they will be lazily stored only when needed. + * @param swap The two vertices to be swapped. + * @return The two TOKENS on those vertices which were swapped. + */ + Swap do_vertex_swap(const Swap& swap); + + /** Checks if the swap sequence performed on the other tracker object + * results in the same vertex permutation. + * This is NOT the same as just checking equality of data, + * because a vertex could be unmentioned in our sequence, + * and thus not appear anywhere internally; but in the other sequence + * it could appear, but end up back where it started. + * @param other Another DynamicTokenTracker object + * @return Whether the swaps performed on this object and the other object + * resulted in the same vertex permutation on the whole graph + * (remembering that some vertices may be mentioned in one object + * but not the other). + */ + bool equal_vertex_permutation_from_swaps( + const DynamicTokenTracker& other) const; + + private: + VertexMapping m_vertex_to_token; + + /** Get the token, but if it doesn't already exist, create it. + * @param vertex The vertex + * @return The token at that vertex, or equal to the vertex number + * IF it doesn't already exist. + */ + size_t get_token_at_vertex(size_t vertex); + + /** Does every token mentioned in this object lie at the same vertex in + * the other object? + * @param other Another DynamicTokenTracker object + * @return Whether all tokens mentioned by this object have + * the same location according to the other object (remembering + * that unmentioned vertices are implicitly assumed to have equal tokens + * lying on them initially). + */ + bool tokens_here_have_equal_locations_in_the_other_object( + const DynamicTokenTracker& other) const; +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp b/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp new file mode 100644 index 0000000000..ede9d94fca --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/ExactMappingLookup.hpp @@ -0,0 +1,80 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "CanonicalRelabelling.hpp" + +namespace tket { +namespace tsa_internal { + +/** Given a raw vertex->vertex mapping which must be enacted exactly (no empty + * tokens), attempt to find an optimal or near-optimal result in a table, and + * handle all vertex back-and-forth relabelling. + */ +class ExactMappingLookup { + public: + /** If successful, "swaps" will contain a vector of swaps which performs the + * desired mapping. */ + struct Result { + std::vector swaps; + bool success; + bool too_many_vertices; + }; + + /** The Result object is stored internally. Tries to find a sequence of swaps + * in the table. + * @param desired_mapping A (source vertex) -> (target vertex) permutation. + * @param edges Edges which exist between the vertices (equivalently, the + * swaps which we are permitted to use). Edges with vertices not appearing in + * desired_mapping will simply be ignored. + * @param max_number_of_swaps Stop looking in the table if every possible + * sequence of swaps in the table which enacts the desired mapping exceeds + * this length (or doesn't exist at all). + */ + const Result& operator()( + const VertexMapping& desired_mapping, const std::vector& edges, + unsigned max_number_of_swaps = 16); + + /** Used for partial mapping lookups; like operator(), but does NOT erase the + * previous result. Overwrites with a new result if an improvement is found. + * @param desired_mapping A (source vertex) -> (target vertex) permutation. + * @param edges Edges which exist between the vertices. + * @param max_number_of_swaps Stop looking in the table once the swap + * sequences exceed this length. + */ + const Result& improve_upon_existing_result( + const VertexMapping& desired_mapping, const std::vector& edges, + unsigned max_number_of_swaps = 16); + + private: + Result m_result; + CanonicalRelabelling m_relabeller; + + /** Attempts to fill m_result, given the relabelling to use. + * If m_result already has a valid solution (i.e., "success" == true), + * only fills if the new solution has strictly fewer swaps. + * @param relabelling_result The result of relabelling, for lookup in the raw + * table. + * @param old_edges Edges which exist between the vertices before relabelling. + * @param max_number_of_swaps Stop looking once the swap sequences exceed this + * length. + */ + void fill_result_from_table( + const CanonicalRelabelling::Result& relabelling_result, + const std::vector& old_edges, unsigned max_number_of_swaps); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp b/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp new file mode 100644 index 0000000000..7a4ee5bccf --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/FilteredSwapSequences.hpp @@ -0,0 +1,161 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "SwapConversion.hpp" + +namespace tket { +namespace tsa_internal { + +/** Takes a raw list of integers, where each integer represents a swap sequence + * on the vertices {0,1,2,...,5} giving the same vertex permutation. + * + * NOTE: the magic number 5 (or 6) arises because we originally constructed + * the table by exhaustively constructing swap sequences on graphs with up to + * 6 vertices, up to a certain length. [Results were also merged together, + * e.g. the cycle C_6, or with a few extra edges added, can be searched + * in reasonable time to a longer length than K_6]. + * This was chosen because the complete graph K_6 has 15 edges, + * so conveniently each edge (or swap) can be represented by a number 1-15, + * and thus by a single hexadecimal digit. + * Thus, 4 bits are needed for each swap, so a 64-bit integer can represent + * swap sequences of length <= 16 (with 0 denoting the end of sequence). + * [Although, the table currently has entries only of length <= 12]. + * [Actually, it is not hard to prove - by considering "token tracking" - + * that optimal swap sequences on <= N vertices have + * length <= N(N-1)/2, the same as the number of edges of K_N. Thus length + * <= 15 already suffices to represent all possible optimal sequences + * on <= 6 vertices]. + * If we used 5 bits, we'd be able to represent sequences of length <= 12 + * (because 5*12 = 60 < 64) on graphs with <= 8 vertices (since + * 8*7/2 = 28 < 31). + * If we expand the table in future, we will probably design a whole new + * format, so we don't attempt to make it more generic at this stage. + * + * Given such data, FilteredSwapSequences knows how to index and store it + * somehow (exactly how is an implementation detail - it can be thought of + * as a "database of swap sequences"), + * so that results can be looked up again, when given the edges bitset + * (i.e., edges existing in the graph, i.e. vertex swaps we are allowed to + * perform). This is for data close to the raw table data; it knows nothing + * about vertex relabelling, which of course is a crucial component. + * + * The main precomputed table of data is also accessed here, via the + * SingleSequenceData constructor. + * + * Note that the raw table contains several lists of integers, + * each one denoting different swap sequences enacting a single permutation, but + * with different edges; whereas this class only stores a single list in + * searchable form. + */ +class FilteredSwapSequences { + public: + /** A result which comes from the "raw" table data in SwapSequenceTable, with + * minimal processing. */ + struct SingleSequenceData { + /** The edges (i.e., swaps) actually used (or 0 if none are used). [This + * could be computed from swaps_code but there is no need to recompute each + * time]. */ + SwapConversion::EdgesBitset edges_bitset; + + /** An integer encoding a sequence of swaps. 0 means no swaps. */ + SwapConversion::SwapHash swaps_code; + + /** The number of swaps used. Set to max() if no valid sequence was found + * (e.g., if not present in the table). */ + unsigned number_of_swaps; + + /** Initialised with "null" values automatically, i.e. number_of_swaps + * taking value max(). */ + SingleSequenceData(); + + /** This is how we access the fixed data in the large const static global + * table. This constructor looks up the shortest sequence of swaps enacting + * the given permutation, and fills the entries. + * @param permutation_hash The hash of the desired permutation of + * {0,1,2,...,5}, as used to look up results in the table (after + * relabelling). See CanonicalRelabelling for explanation. + * @param edges_bitset The collection of edges on {0,1,2,...,5} which + * actually exist in the graph (i.e., the swaps which are allowed). + * @param max_number_of_swaps Do not return any solutions with more swaps + * than this: useful speedup to allow early termination. + */ + SingleSequenceData( + unsigned permutation_hash, SwapConversion::EdgesBitset edges_bitset, + unsigned max_number_of_swaps); + }; + + /** Index and process the raw data to allow later retrieval. Can only be done + * once (a security measure to avoid accidentally reconstructing large tables + * multiple times). The codes don't need to be sorted OR deduplicated. + * Duplicate, redundant and suboptimal data IS tolerated, as long as it is + * correct. Such data could lead to slowdowns from a larger table, BUT will + * not affect the actual results (i.e., if the data contains some entries + * inferior to others, then the inferior results will automatically never be + * returned, because the superior ones will always be found). + * @param codes The raw list of integers stored in the original table + */ + void initialise(std::vector codes); + + /** Search for the entry with fewest swaps whose edges_bitset is a + * subset of the given edges_bitset (so that it only uses allowed swaps). + * If there is no suitable sequence in the table, returns a null object. + * Stop searching early if it finds that all entries have too many swaps. + * @param allowed_swaps The swaps which can occur (in other words, the + * existing edges in the graph). + * @param max_num_swaps Don't return any entries with more than this many + * swaps. + * @return An entry with the fewest swaps, or a null entry if none exists. + */ + SingleSequenceData get_lookup_result( + SwapConversion::EdgesBitset allowed_swaps, unsigned max_num_swaps) const; + + /** For testing, just count how many entries we've stored. + * @return The total number of encoded swap sequences stored internally. + */ + size_t get_total_number_of_entries() const; + + private: + /** We recalculate the number of swaps each time, rather than storing. + * We just sort by swaps_code, since this respects numbers of swaps. + * I.e., if S1, S2 are swap sequences, and encoding(S(j)) is an integer, then + * length(S1) < length(S2) => encoding(S1) < encoding(S2). + * Thus, minimising encoding(S) will also force minimising length(S). + */ + struct TrimmedSingleSequenceData { + SwapConversion::EdgesBitset edges_bitset; + SwapConversion::SwapHash swaps_code; + }; + + /** Key: a subset of bits in edges_bitset. + * Value: codes containing those bits in their edges bitset, sorted in + * increasing order. No entry occurs multiple times, but the values are spread + * out amongst the keys to balance the data better and give faster lookup. + */ + std::map> + m_internal_data; + + /** Must be pushed back in increasing order of swaps_code. Processes and + * stores the result for later searchability. + * @param datum Information about a single raw entry from the table. + */ + void push_back(TrimmedSingleSequenceData datum); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp new file mode 100644 index 0000000000..6d6ade36bc --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/GeneralFunctions.hpp @@ -0,0 +1,85 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +// This is for "leftover" functions not specifically linked to token swapping +// which are candidates for being used and moved elsewhere, +// e.g. the main src/Utils folder. + +#include +#include +#include + +#include "Utils/Assert.hpp" + +namespace tket { +namespace tsa_internal { + +/** Returns the value in a map corresponding to a key, IF it exists, + * or an empty optional object if it does not. + * @param map The std::map object. + * @param key The key. + * @return The value if it exists, or an empty optional value if it doesn't. + */ +template +std::optional get_optional_value(const std::map& map, const K& key) { + const auto citer = map.find(key); + if (citer == map.cend()) { + return {}; + } + return citer->second; +} + +/** The key->value mapping is required to be bijective (reversible). + * @param map The std::map object. + * @return Another std::map, with the key->value mappings reversed. + */ +template +std::map get_reversed_map(const std::map& map) { + std::map reversed_map; + for (const auto& entry : map) { + reversed_map[entry.second] = entry.first; + } + TKET_ASSERT(map.size() == reversed_map.size()); + return reversed_map; +} + +/** Finds the rightmost "one" (least significant bit) + * occurring in the binary expansion of x, an unsigned integer type. + * Returns the bit, whilst also removing it from x. + * @param x The original unsigned integer type, which will have one bit removed + * (or remain at zero if already at zero). + * @return The bit which was removed from x (or 0 if none was removed). + */ +template +static UINT get_rightmost_bit(UINT& x) { + // Standard bit hack: decrementing 10000... gives 01111... + // E.g., consider: + // x = 001101011010000 + // x-1 = 001101011001111 + // ~(x-1) = 110010100110000 + // Therefore, AND x with ~(x-1). + + // No "if" statements; unsigned int wraparound is allowed. + UINT y = x; + --y; + y = ~y; + const UINT bit = (x & y); + x ^= bit; + return bit; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa.hpp b/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa.hpp new file mode 100644 index 0000000000..f64ecd1335 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/HybridTsa.hpp @@ -0,0 +1,52 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "CyclesPartialTsa.hpp" +#include "TrivialTSA.hpp" + +namespace tket { +namespace tsa_internal { + +/** A full end-to-end TSA, combining the partial cycles TSA + * (hopefully good) with the full "trivial" TSA (not so good). + */ +class HybridTsa : public PartialTsaInterface { + public: + HybridTsa(); + + /** For the current token configuration, calculate a sequence of swaps + * to move all tokens home, and append them to the given list. + * As this is a full TSA, it guarantees to find a solution. + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping. + * @param distances An object to calculate distances between vertices. + * @param neighbours An object to calculate adjacent vertices to any given + * vertex. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. + */ + virtual void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + RiverFlowPathFinder& path_finder) override; + + private: + CyclesPartialTsa m_cycles_tsa; + TrivialTSA m_trivial_tsa; +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp new file mode 100644 index 0000000000..66bfb96afe --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/NeighboursInterface.hpp @@ -0,0 +1,44 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +namespace tket { + +/** What are the adjacent vertices to a given vertex on a graph? + * For larger, sparse graphs, it might + * calculate and store neighbours only when required. + */ +class NeighboursInterface { + public: + /** Returns the neighbours of the given vertex. + * The vector of neighbours is required to be stored internally. + * However, no guarantee that the reference will remain valid + * once another function call occurs. + * By default, throws (not implemented). + * (It's useful to be able to create a "null" object like this, + * because some algorithms don't actually need a neighbours object, + * but others do). + * @param vertex A vertex. + * @return A sorted list of all adjacent vertices, stored internally. + */ + virtual const std::vector& operator()(size_t vertex); + + virtual ~NeighboursInterface(); +}; + +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp new file mode 100644 index 0000000000..ce2c8b5911 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/PartialMappingLookup.hpp @@ -0,0 +1,76 @@ + +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "ExactMappingLookup.hpp" + +namespace tket { +namespace tsa_internal { + +/** This is the same as ExactMappingLookup, except that we allow vertices not to + * have tokens. It works simply by going through possible permutations of empty + * vertices and doing an exact permutation lookup (limiting the number of + * permutations to avoid excessive slowdown). + */ +class PartialMappingLookup { + public: + /** Parameters controlling the partial mapping lookup. Sensible defaults, + * found by experimentation. */ + struct Parameters { + /** To speed up, don't try all permutations if there are many empty + * vertices; limit them to this number. */ + unsigned max_number_of_empty_vertex_permutations; + + Parameters(); + }; + + /** If desired, change some internal parameters. + * @return Internal parameters object, to be changed if desired. + */ + Parameters& get_parameters(); + + /** The result is stored internally. The same format as ExactMappingLookup. + * @param desired_mapping A (source vertex) -> (target vertex) permutation. + * @param edges Edges which exist between the vertices (equivalently, the + * swaps which we are permitted to use). Edges with vertices not appearing in + * desired_mapping will simply be ignored. + * @param vertices_with_tokens_at_start Every vertex mentioned within + * desired_mapping which has a token, just BEFORE the swaps are performed to + * enact the desired_mapping, must be mentioned here. Other vertices not + * mentioned in the mapping are allowed; they will simply be ignored. + * @param max_number_of_swaps Stop looking if every sequence of swaps in the + * table which enacts the desired mapping exceeds this length (or doesn't + * exist at all). + */ + const ExactMappingLookup::Result& operator()( + const VertexMapping& desired_mapping, const std::vector& edges, + const std::set& vertices_with_tokens_at_start, + unsigned max_number_of_swaps = 16); + + private: + Parameters m_parameters; + ExactMappingLookup m_exact_mapping_lookup; + std::vector m_empty_source_vertices; + std::vector m_empty_target_vertices; + VertexMapping m_altered_mapping; +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp b/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp new file mode 100644 index 0000000000..bdf935a6b1 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/PartialTsaInterface.hpp @@ -0,0 +1,68 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "DistancesInterface.hpp" +#include "NeighboursInterface.hpp" +#include "RiverFlowPathFinder.hpp" +#include "VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** TSA stands for Token Swapping Algorithm. + * A "partial TSA" is allowed to give up (not calculate any swaps), + * even when the tokens are not all home. + * The hope is that different partial TSAs can be combined to give + * a good full TSA (i.e., one which always finds a complete solution). + */ +class PartialTsaInterface { + public: + /** The algorithm is allowed to fail (not calculate any swaps), + * but when it DOES return swaps, it is required to decrease L + * (the sum of the distances of each vertex containing a token + * from its target vertex). + * Thus progress is always nonnegative. + * Of course, a complete TSA is a special case. + * @param swaps The list of swaps to append to (does not clear first). + * @param vertex_mapping The current desired mapping. Each key is the + * current vertex where a token is; its value is the target vertex + * the token wants to reach. Usually, will be updated upon return to be the + * new configuration after performing the swaps. + * @param distances An object to calculate distances between vertices. + * @param neighbours An object to calculate adjacent vertices to any given + * vertex. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. (Of course, paths might not be unique if the graph + * is not a tree, so it is an important part of the heuristics that + * the returned paths are fairly "consistent", i.e. "nearby" vertex pairs + * should return "nearby" paths). + */ + virtual void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& neighbours, + RiverFlowPathFinder& path_finder) = 0; + + /** For debugging purposes, every TSA object has a name. + * @return The name of this object (not necessarily unique). + */ + const std::string& name() const; + + protected: + std::string m_name; +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp b/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp new file mode 100644 index 0000000000..6ab237726e --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/RiverFlowPathFinder.hpp @@ -0,0 +1,98 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "DistancesInterface.hpp" +#include "NeighboursInterface.hpp" +#include "Utils/RNG.hpp" + +namespace tket { +namespace tsa_internal { + +/** Given two vertices in a graph, find a shortest path between them; + * of course paths might not be unique. + * The aim is to make paths overlap; + * if we move tokens along paths with many edges in common, it is more likely + * that some basic swap optimisation will reduce the number of swaps. + * (Disjoint swaps are the worst kind to optimise, of course; + * no reduction is possible). + * + * We think of flowing water: if water has already flowed through, + * it creates channels along which it is more likely to flow next time. + * We do a similar thing: by remembering which edges have already been used, + * whenever we have a choice of edge to continue a path, choose one which + * has already been used frequently. + * + * Repeated calls to operator()(v1,v2) + * are likely to return the same path, but may change slightly over time. + */ +class RiverFlowPathFinder { + public: + /** All the objects should remain valid throughout + * the lifetime of this object. + * @param distances An object to calculate distances between vertices. + * @param neighbours An object to calculate adjacent vertices to any given + * vertex. + * @param rng A source of (pseudo) randomness. + */ + RiverFlowPathFinder( + DistancesInterface& distances, NeighboursInterface& neighbours, RNG& rng); + + /** For reuse in different problems (but still the same architecture; + * the same "distances" and "neighbours" objects are used), + * but constructing paths anew + * (which is appropriate because completely different problems will + * probably need different paths). This also resets the RNG with its + * default seed, for better reproducibility. + * + * (This may be suitable for simulated annealing-type algorithms + * which involve solving with many different token positions, i.e. + * partially finished problems, even though the end-to-end problem + * is the same). + */ + void reset(); + + /** Get the path from v1 to v2. May change over time, and + * path(v1, v2) is NOT necessarily the reverse of path(v2, v1). + * @param vertex1 First vertex v1. + * @param vertex2 Second vertex v2. + * @return A list of vertices, starting with v1 and ending with v2, + * giving a shortest path from v1 to v2. + */ + const std::vector& operator()(size_t vertex1, size_t vertex2); + + ~RiverFlowPathFinder(); + + /** Whenever an edge is used, i.e. we swap tokens along it, tell this + * object; the proper functioning of this class depends on + * knowing which edges have been used in the solution so far. + * @param vertex1 First vertex v1 of an edge v1-v2 that was used in the + * solution. + * @param vertex2 Second vertex v2 of the edge. + */ + void register_edge(size_t vertex1, size_t vertex2); + + private: + struct Impl; + /** Pimpl idiom. */ + std::unique_ptr m_pimpl; +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp new file mode 100644 index 0000000000..40cdde8425 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapConversion.hpp @@ -0,0 +1,101 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "TokenSwapping/SwapFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/* +NOTE on ENCODING: with 6 vertices, there are 15 possible edges or swaps. +Thus, we can encode a single swap by a number in the range 0-15 (using 0 to +denote "no swap"). + +This fits into 4 bits exactly. + +Thus, a single 64-bit unsigned int can store any swap sequence of length <= 16. +We also have the added benefit that ints written in hexadecimal are easier for a +human to read, since each hex digit 0-9 or A-F corresponds to a single swap. + +An obvious optimisation is that adjacent swaps should be different; +and also, blocks of four zeros cannot occur within the encoding. +However, this would still only reduce the total number to about 30%, +so we'd still need 62 or 63 bits to represent all sequences of length <= 16. +So it's not worth trying fancy encodings to store more possible sequences in +fewer bits, without a good theoretical breakthrough to come up with a really +good way to encode and search through only optimal or "near optimal" sequences. + +If we desire in future to increase the number of vertices, we'd have to use at +least 5 bits per swap, so could only fit sequences of length <= 12 in a 64-bit +int. Of course, (8*7)/2 = 28 < 31, so we could store swaps on <= 8 vertices +instead of 6. + +*/ + +// Generally no checks on the input values, it's assumed that the caller +// knows how the table encoding works. +// The possible swaps (01), (02), (03), ..., (45) on vertices {0,1,2,3,4,5} +// are listed in a global vector, so with values 0,1,...,14. +// Adding 1 to the index gives possible values 1,2,...,15 for the swaps, +// and 0 means no swap. Thus a sequence of swaps is encoded by storing the bits +// in a uint, with first swap at the least significant bits, and so on with +// leftward shifts by 4 bits each time. + +struct SwapConversion { + /** Encodes a sequence of <=16 swaps, each swap being one of + * the 15 possible swaps on vertices {0,1,2,3,4,5}, and hence encoded by 4 + * bits. Zero represents the empty sequence. */ + typedef std::uint64_t SwapHash; + + /** Encodes a set of swaps, each one taken from the 15 possibilities. With + * each swap given a numerical value from 1 to 15, we simply shift 1u by that + * amount (minus one), and OR them together. Thus, when looking up in a table, + * we only allow swap sequences whose edge bitset is a SUBSET of a given edges + * bitset (corresponding to the edges in the graph, i.e. allowed swaps). + */ + typedef std::uint_fast16_t EdgesBitset; + + /** Given a valid number x, return the actual swap on vertices {0,1,2,3,4,5} + * which it represents. + * @param x A code number representing a single swap. + * @return A single swap on vertices {0,1,2,3,4,5}. + */ + static const Swap& get_swap_from_hash(SwapHash x); + + /** The opposite of get_swap_from_hash. + * @param swap A swap on {0,1,2,3,4,5}. (Must be in standard order, i.e. (i,j) + * with 0 <= i < j <= 5). + * @return A number 1-15 which encodes that swap in the table. + */ + static SwapHash get_hash_from_swap(const Swap& swap); + + /** Converting swaps to bitsets, which swaps are used in the code? + * @param swaps_code An integer representing a sequence of swaps. + * @return The set of swaps used in the sequence, encoded as a binary number. + */ + static EdgesBitset get_edges_bitset(SwapHash swaps_code); + + /** The number of swaps in a sequence. + * @param swaps_code An integer representing a sequence of swaps. + * @return The length of the swap sequence. + */ + static unsigned get_number_of_swaps(SwapHash swaps_code); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp new file mode 100644 index 0000000000..f64db27266 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapFunctions.hpp @@ -0,0 +1,46 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "TokenSwapping/VectorListHybrid.hpp" + +namespace tket { + +typedef std::pair Swap; +typedef VectorListHybrid SwapList; +typedef SwapList::ID SwapID; + +/** No distinction between (v1, v2) and (v2, v1). + * Will ensure that v1token mapping. + */ + void full_optimise(SwapList& list, const VertexMapping& vertex_mapping); + + // Most optimisation passes below are O(N^2.log N) in the worst case, + // but in practice will hopefully be a lot faster. + // It's hard to compare passes; + // for any two passes A, B there are probably examples where + // pass A is better than B, but others where B is better than A. + // Also, passes are not commutative; reordering the passes + // can give different results! Experimentation needed. + + /** Do not move any swaps, unless it cancels with a previous copy of itself + * (in which case, delete both). The fastest pass. + * @param list The swaps to be optimised. + */ + void optimise_pass_with_zero_travel(SwapList& list); + + /** Starting from the front and working along, every swap is moved + * as far towards the front as possible, until it hits a non-disjoint + * swap (so that it cannot pass through it; it doesn't commute), + * or an identical copy of itself (so that they cancel each other). + * The overall reduction should be the same as + * optimise_pass_with_zero_travel, which is cheaper + * (because swaps do not move), but interacting swaps should + * cluster together, which may be useful for certain algorithms. + * @param list The swaps to be optimised. + */ + void optimise_pass_with_frontward_travel(SwapList& list); + + /** Erase two swaps if they do the same TOKEN swap (which means that + * they can be removed). Knows nothing about the problem-specific tokens, + * instead this creates artificial tokens. + * This is slower than optimise_pass_with_zero_travel and + * optimise_pass_with_frontward_travel, but is strictly more powerful + * (any reduction by those passes will also occur with this pass, + * but some additional reductions are possible with this pass. E.g., + * this pass reduces (01)(12)(01)(12)(01)(12), the cube of a 3-cycle, + * to zero swaps, which the other passes cannot, since (01) and (12) + * are not disjoint and hence cannot pass through each other). + * However, NOTE that this pass can introduce EMPTY swaps, w.r.t. + * the problem-specific tokens, so further passes to remove + * problem-specific empty token swaps are necessary + * to get the full reduction. + * @param list The swaps to be optimised. + */ + void optimise_pass_with_token_tracking(SwapList& list); + + /** O(N log N): simply discard any swap between two empty tokens. + * (Recall that optimise_pass_with_token_tracking does NOT know + * about these specific tokens, it creates internal artificial ones + * just for the pass. That pass can be much slower than this pass, + * but also can make some reductions which this pass cannot). + * @param list The swaps to be optimised. + * @param vertex_mapping The desired source->target mapping (so that + * we can determine which vertices have tokens on them). + */ + void optimise_pass_remove_empty_swaps( + SwapList& list, VertexMapping vertex_mapping); + + private: + std::map m_data; + + DynamicTokenTracker m_token_tracker; + + /** What would happen if you tried to move the swap towards the front? + * Doesn't actually move the swap, just returns the ID of the first + * blocking swap it hits (or null if there is none and it could move + * all the way to the front), UNLESS it actually hits another copy of itself, + * in which case it DOES erase (and the caller can tell by checking the + * size). + * @param list The swaps to be optimised. + * @param id The ID of the current swap which we might move frontwards. + * @return The ID of the previous reachable distinct swap which is + * non-disjoint (has a vertex in common, so doesn't commute), + * or empty if none exists. + */ + static std::optional get_id_of_previous_blocker( + SwapList& list, SwapID id); + + /** Actually move the swap as far towards the front as possible until + * blocked, erasing it if it cancelled with another copy of itself. + * @param list The swaps to be optimised. + * @param id The ID of the current swap to move frontwards. + * @return true if the swap cancelled with a copy of itself. + */ + static bool move_swap_towards_front(SwapList& list, SwapID id); + + /** The same as optimise_pass_with_token_tracking, + * but without calling "clear" OR "reset" on m_token_tracker first). + * @param list The swaps to be optimised. + */ + void optimise_pass_with_token_tracking_without_clearing_tracker( + SwapList& list); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp new file mode 100644 index 0000000000..87b5c72d31 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapListSegmentOptimiser.hpp @@ -0,0 +1,97 @@ + +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "PartialMappingLookup.hpp" +#include "TokenSwapping/SwapFunctions.hpp" +#include "VertexMapResizing.hpp" + +namespace tket { +namespace tsa_internal { + +/** Given a swap list and a start point in the list, uses the lookup table + * to reduce an interval of swaps, replacing them in-place by a shorter sequence + * with the same end-to-end vertex mapping (although source->target mappings may + * change for empty source vertices, i.e. those without a token at the + * beginning). + */ +class SwapListSegmentOptimiser { + public: + struct Output { + /** The length of the segment that was replaced. + * Of course, this will be zero if no optimisation takes place. + */ + size_t initial_segment_size; + + /** The length of the segment after replacement. Always <= + * initial_segment_size. */ + size_t final_segment_size; + + /** If we did replace a segment with a shorter one, give the ID of the last + * swap of the segment. It might be null because the new segment might be + * empty. + */ + std::optional new_segment_last_id; + }; + + /** Starting at the given ID, which must be valid, move forward to examine an + * interval of swaps, and try to replace it with a shorter sequence looked up + * in the table. It MAY replace a segment with a different one of equal + * length; optimisation has probably already taken place, and couldn't break + * it up any further. If the table suggests a different but still valid + * interval, it MAY afford further opportunities for optimisation even if it's + * of the same length, so we might as well splice in the new segment. + * @param initial_id The ID within the swap list of the first swap which may + * be replaced, where we begin optimisation. + * @param vertices_with_tokens_at_start Just before the swap at initial_id is + * performed, which vertices have tokens on them? Extra unused vertices are + * allowed (but are helpful, since they may be added into the new sequence to + * reduce length). + * @param map_resizing An object to add/remove vertices from the mapping, with + * knowledge of edges in the graph (not just those involved in the swap list). + * @param swap_list The sequence of swaps to be reduced, in-place. + * @return An object stored internally, with information about the segment + * replacement/reduction (if any). + */ + const Output& optimise_segment( + SwapID initial_id, const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list); + + private: + Output m_output; + PartialMappingLookup m_mapping_lookup; + + // Naively, a greedy-type way to optimise is to + // reduce the SHORTEST sequence possible, by the LARGEST amount. + // This may not always be optimal, but should be OK. + std::vector m_best_optimised_swaps; + + /** Once m_output.initial_segment_size and m_best_optimised_swaps have been + * filled, fill in the rest of the data in m_output and make the swap + * replacements in swap_list. + * @param initial_id The ID within the swap list of the first swap which may + * be replaced, where we begin optimisation. + * @param swap_list The sequence of swaps to be reduced, in-place. + */ + void fill_final_output_and_swaplist(SwapID initial_id, SwapList& swap_list); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp new file mode 100644 index 0000000000..7a7532dd9a --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapListTableOptimiser.hpp @@ -0,0 +1,91 @@ + +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "PartialMappingLookup.hpp" +#include "SwapListSegmentOptimiser.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" +#include "VertexMapResizing.hpp" + +/// TODO: The swap table optimiser currently tries to optimise many segments; +/// solving ~2300 problems with Best TSA takes ~20 seconds, most of which +/// is the table optimisation part. +/// Certainly we can cut down the number of segments optimised; +/// needs experimentation. + +namespace tket { +namespace tsa_internal { + +/** Uses the lookup table to reduce many intervals of a swap sequence. */ +class SwapListTableOptimiser { + public: + /** Reduce the given list of swap in-place, by using the big lookup table. + * Swaps may be significantly reordered, and the final end-to-end + * permutation of vertices may change; only the partial mapping of those + * vertices with tokens is preserved. It's not actually clear what the best + * method is; experimentation is still needed. We can optimise any segment, + * i.e. between any two points. But then, which other segments should we + * choose? Should we overlap trial segments? Should we then combine with the + * simple SwapListOptimiser again? We are lacking a lot of theory to guide us. + * This pass will erase some empty swaps, but doesn't guarantee to find all + * (although in practice, it never does produce empty swaps, if they were + * previously well optimised with a swap list optimiser. Is this "luck", or is + * there a theoretical reason?) + * @param vertices_with_tokens_at_start Before we perform any swaps, which + * vertices have tokens on them? Other vertices are allowed to be moved around + * arbitrarily. + * @param map_resizing An object to take a VertexMapping and enlarge/contract + * it to give the desired number of vertices. So, this object knows about the + * edges in the graph. + * @param swap_list The sequence of swaps to be shortened. + * @param swap_list_optimiser An object to handle non-table optimisations. + * This is used only to do the basic passes needed to make the table effective + * (i.e., clustering interacting swaps together). + */ + void optimise( + const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list, + SwapListOptimiser& swap_list_optimiser); + + /** For testing, give internal access to the segment optimiser. + * @return a reference to the internal segment optimiser object. + */ + SwapListSegmentOptimiser& get_segment_optimiser(); + + private: + SwapListSegmentOptimiser m_segment_optimiser; + + /** The same interface as "optimise", which goes in both directions, + * and calls this function in a loop, repeatedly reversing and re-reversing + * the swap list to do both directions. A bit crude, but simple and not + * actually too inefficient. + * @param @param vertices_with_tokens_at_start Before we perform any swaps, + * which vertices have tokens on them? + * @param map_resizing An object to take a VertexMapping and enlarge/contract + * it to give the desired number of vertices. + * @param swap_list The sequence of swaps to be shortened. + * @param swap_list_optimiser An object to handle non-table optimisations. + */ + void optimise_in_forward_direction( + const std::set& vertices_with_tokens_at_start, + VertexMapResizing& map_resizing, SwapList& swap_list, + SwapListOptimiser& swap_list_optimiser); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp b/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp new file mode 100644 index 0000000000..3c9939c771 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/SwapSequenceTable.hpp @@ -0,0 +1,122 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +namespace tket { +namespace tsa_internal { + +/** For swaps on vertices {0,1,2,...,5}, return precomputed short swap + * sequences using given sets of edges. Should be close to optimal. + * (Every sequence should have the joint-shortest length amongst all sequences + * using those particular swaps, but not every possible sequence is included). + * + * (The only possibility of non-optimality is that some solutions + * using many edges might be missing. It was constructed using breadth-first + * searches of all possible sequences up to certain depths on various graphs + * with <= 6 vertices. Due to time/space limitations some non-complete graphs + * were searched as well as complete graphs K4, K5, K6. + * + * Note that, by token tracking, any swap sequence of n vertices of length + * > n(n-1)/2 can be reduced in length, so in fact any optimal swap sequence + * on n vertices has length <= n(n-1)/2, the number of edges in + * the complete graph K(n). + * + * Of course, ideally we'd search K6 up to depth 15, but searching up to depth 9 + * already consumed ~30 mins of CPU time and most of the memory capacity of an + * ordinary laptop. More efficient exhaustive search algorithms with clever + * pruning might cut it down a bit, but (since each added depth increases the + * difficulty roughly by a factor of 14) it would require significant + * computational effort to reach even depth 12 for K6, and depth 15 probably + * requires a supercomputer, or a very large distributed computation, + * or significantly more intelligent methods). + * + * The table size is far smaller than the precomputation needed to create it. + * The creation considered millions of sequences, but the table has only a few + * thousand entries. + * + * The table currently contains ALL optimal swap sequences on <= 5 vertices, + * and also all swap sequences of length: + * <= 9 on 6 vertices (K6, depth 9); + * <= 12 on cycles with <= 6 vertices (C5, C6); + * <= 12 on a few other special graphs with 6 vertices. + * + * Superficially redundant solutions have been removed: + * + * (a): If sequences S1, S2 have equal length but the edges set E1 is a subset + * of E2, keep only S1, since every graph allowing S2 would also allow S1. + * + * (b): If sequences S1, S2 have len(S1) < len(S2), keep S2 exactly when E2 is + * NOT a subset of E1 (since, then there are graphs containing E2 which do NOT + * contain E1, so that S2 may be possible when S1 is impossible). + * + * Finally, to save space, every sequence was checked before insertion, and + * inserted ONLY if its inverse was not already present in the table (since + * inverting permutations is trivial for swaps: just reverse the order). Hence, + * the table is only about half the size that it would otherwise be. + * + * But, whilst these sequences are universally valid, + * this class knows nothing about HOW to look up results in the table + * efficiently. The current lookup algorithms are quite crude (but actually + * faster than fancier algorithms for this table size), but there is some + * possibility of speedup (although not result improvements) if a really fancy + * search/filtering algorithm can be found. + * + * NOTE: the format is reasonable, but still not as compressed as possible; + * it still contains multiple isomorphic entries. A more complicated hashing + * scheme is required to cut down on these isomorphic copies. (E.g., perm hash + * 2, meaning the mapping 0->1, 1->0, i.e. (01), contains 0x262, 0x484, 0x737, + * meaning swap sequences [02 12 02], [04 14 04], [13 03 13]. It is easily seen + * that all 3 are isomorphic. The first two are of the form [ab cb ab] == [ac], + * and the third has the form [ab cb ab] == [ca].) It seems like we'd need a + * scheme involving integer hashing of graphs, with few isomorphic collisions, + * but such algoritms need to be pretty simple and fast or they're not worth + * doing except for much larger table sizes. + */ +struct SwapSequenceTable { + /** The integer type used to encode a swap sequence on vertices {0,1,2,3,4,5}. + */ + typedef std::uint_fast64_t Code; + + /** The KEY is a "permutation hash", i.e. a number representing a permutation + * on {0,1,2,3,4,5}. (Not all possible permutations are represented, though; + * suitable vertex relabelling changes many different permutations to the same + * hash). + * + * See CanonicalRelabelling.hpp, SwapConversion.hpp for more explanation. + * + * The VALUE is a list of integers encoding a swap sequence, which all induce + * the permutation on {0,1,2,3,4,5} with the given hash. + * (Obviously, different sequences are allowed, because some swaps might not + * be possible, i.e. the graph might be incomplete). + */ + typedef std::map> Table; + + /** The actual large precomputed table. The entries are already sorted + * and duplications/redundancies/suboptimality have been removed. + * However, currently this raw data is processed by + * FilteredSwapSequences which tolerates such imperfections. + * Thus it is easy to add more sequences to the table without worrying + * about them (as long as the newly added data is actually correct). + * @return A large precomputed raw table of data. + */ + static Table get_table(); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp b/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp new file mode 100644 index 0000000000..bfa1f50b00 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/TrivialTSA.hpp @@ -0,0 +1,221 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "PartialTsaInterface.hpp" + +namespace tket { +namespace tsa_internal { + +/** A full TSA, simple and fast but not giving very good solutions. + * This works by decomposing the desired mapping into abstract disjoint + * cycles, decomposing the abstract cycles into lists of abstract swaps, + * then finally decomposing the abstract swaps into concrete swaps. + * ("Abstract" means that the vertices invloved are not necessarily + * adjacent, so the actual swaps cannot be calculated without knowing + * the graph, and "concrete" swaps are actual swaps beteen adjacent vertices). + * Because the ABSTRACT cycles are disjoint, we are free to perform them, + * as long as no other vertices are moved when doing so (they may be moved + * in intermediate steps, but will be moved back again by the end of each + * cycle). Thus we are guaranteed to get a full solution, + * although in tests it can easily give 20-30% more swaps than the best TSA. + */ +class TrivialTSA : public PartialTsaInterface { + public: + /** Extra options to control behaviour. */ + enum class Options { + /** Run the algorithm to completion. */ + FULL_TSA, + + /** Start running the calculated swaps, + * but terminate as soon as nonzero L decrease occurs + * (which thus gives a Partial TSA). + */ + BREAK_AFTER_PROGRESS + }; + + /** By default, it's a full TSA. + * @param options Option to set behaviour; by default, a full TSA. + */ + explicit TrivialTSA(Options options = Options::FULL_TSA); + + /** Set another option. + * @param options The option to be set from now on. + */ + void set(Options options); + + /** Calculate and append the complete solution (or break off early if + * BREAK_AFTER_PROGRESS was set). The point is that this partial TSA + * is not so good, but will be combined with other partial TSAs which + * are better, so we want to break off ASAP when progress occurs. + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping, will be updated. + * @param distances An object to calculate distances between vertices. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. + */ + virtual void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, NeighboursInterface& /*not_needed*/, + RiverFlowPathFinder& path_finder) override; + + /** The same as the standard append_partial_solution interface, + * but without needing to pass in a NeighboursInterface. + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping, will be updated. + * @param distances An object to calculate distances between vertices. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. + */ + void append_partial_solution( + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, RiverFlowPathFinder& path_finder); + + private: + // NOTE: the reason this is all a bit more complicated (and so, the word + // "trivial" is a bit unfair) is that we have to allow empty vertices. + // With full vertices (every vertex having a token), we can find cycles just + // by starting anywhere and going forwards until we hit the start again. + // But if some vertices can be empty, we may not be able to go forward + // once we hit an empty vertex, so we then have to go backwards also + // until we cannot anymore, and finally link the empty end with the nonempty + // start vertex to make a cycle. + // However, it's really just the same algorithm as the full tokens case. + + Options m_options; + + /** This will contain ALL relevant vertices for ALL cycles, but another + * object m_cycle_endpoints will store information about where + * each cycle starts and ends. + */ + VectorListHybrid m_abstract_cycles_vertices; + mutable std::set m_vertices_seen; + + typedef VectorListHybrid::ID ID; + + /** For an abstract cycle: the first is the ID of the start vertex in + * "m_abstract_cycles_vertices" (which already has a builtin linked list + * structure), the second is the final vertex. + */ + typedef std::pair Endpoints; + + /** Information about where each cycle starts and ends, + * using the vertices in m_abstract_cycles_vertices. + */ + std::vector m_cycle_endpoints; + std::vector m_vertices_work_vector; + + /** Fills m_abstract_cycles_vertices, m_cycle_endpoints with the cycles. + * @param vertex_mapping The current desired mapping. + */ + void fill_disjoint_abstract_cycles(const VertexMapping& vertex_mapping); + + /** Taking the given first element of "endpoints" as the start vertex, + * already known to be in "vertex_mapping", follow the arrows forwards + * until no more arrows exist, OR it wraps around to the first vertex, + * adding the vertices to "m_abstract_cycles_vertices" as we go, + * and updating "endpoints". Does NOT change m_vertices_seen. + * @param vertex_mapping The current desired mapping. + * @param endpoints The IDs of the vertex endpoints of the desired new cycle + * (but only the first ID is valid at the start; the second ID will be + * updated). + * @return TRUE if a cycle is found, FALSE if it ends at an empty vertex. + */ + bool grow_cycle_forwards( + const VertexMapping& vertex_mapping, Endpoints& endpoints); + + /** To be called immediately after grow_cycle_forwards, + * if the end vertex did NOT wrap around to the start vertex. + * So, go backwards from the start vertex until we cannot any more. + * (We can't hit the end vertex since it's empty, + * so no arrow can come from there). + * Update endpoints.first. + * Does NOT change m_vertices_seen. Uses m_reversed_vertex_mapping. + * @param endpoints The IDs of the partial vertex cycle start and end + * vertices, to be updated (the end of the cycle must wrap round + * to the start; the start is not yet determined). + */ + void grow_cycle_backwards(Endpoints& endpoints); + + /** The ordinary vertex mapping is from v1 to v2, + * where v2 is the target of the token currently at v1. + * For this mapping, the key is v2, the value is v1. + */ + VertexMapping m_reversed_vertex_mapping; + + /** Checks validity/consistency of the data in m_abstract_cycles_vertices, + * m_cycle_endpoints, m_reversed_vertex_mapping and throws if invalid. + */ + void do_final_checks() const; + + /** Gets the vertices stored in order in m_abstract_cycles_vertices, + * given by the Endpoints, and copies them to m_vertices_work_vector. + * (Necessary because we need to do random access, which VectorListHybrid + * does not have). + * @param endpoints The IDs of the complete vertex cycle start and end, + * listed in order in m_abstract_cycles_vertices. + */ + void copy_vertices_to_work_vector(const Endpoints& endpoints); + + /** Once m_abstract_cycles_vertices and m_cycle_endpoints have been filled, + * append the complete solution. + * (We don't need to find distances any more, we need actual paths). + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping, will be updated. + * @param path_finder The object to calculate a shortest path between any + * pair of vertices. + */ + void append_partial_solution_with_all_cycles( + SwapList& swaps, VertexMapping& vertex_mapping, + RiverFlowPathFinder& path_finder); + + /** Perform the single abstract cycle, but breaking off as soon as + * the overall total home distance (L) decreases. + * (Every abstract cycle has strictly positive L-decrease, otherwise + * it wouldn't be included at all, so doing the whole thing must decrease L. + * But if we're lucky, we'll decrease L earlier). + * + * Note that we ALSO have to do some estimation, not only to choose + * which cycle is likely to be cheap, but ALSO to decide where to + * start from. (An ABSTRACT cycle [v0, v1, v2, ..., vn] is decomposed into + * ABSTRACT swaps (v0, v1).(v1,v2). ... .(v(n-1), vn), which omits the + * abstract swap (vn,v0), but we could have chosen any other v(i) to be + * the start vertex. Unlike for CONCRETE swaps, abstract swaps have + * different costs, so it's important to choose well). + * + * @param endpoints The IDs of the ends of the final cycle + * we've decided to use. + * @param start_v_index The starting index in the final cycle vertices, + * treating it logically as a vector. (The indices wrap round and reduce + * modulo the size). + * @param swaps The list of swaps to append to. + * @param vertex_mapping The current desired mapping, will be updated. + * @param distances An object to calculate distances between vertices. + * @param path_finder An object to calculate a shortest path between any + * pair of vertices. + * @return the actual L-decrease (will be strictly positive). + */ + size_t append_partial_solution_with_single_cycle( + const Endpoints& endpoints, size_t start_v_index, + // L (the sum of the distances to home) must decrease + // by at least this amount, to break off early. + SwapList& swaps, VertexMapping& vertex_mapping, + DistancesInterface& distances, RiverFlowPathFinder& path_finder); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp new file mode 100644 index 0000000000..c604b1e2a9 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybrid.hpp @@ -0,0 +1,533 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "Utils/Assert.hpp" +#include "VectorListHybridSkeleton.hpp" + +namespace tket { + +struct OverwriteIntervalResult { + size_t number_of_overwritten_elements; + tsa_internal::VectorListHybridSkeleton::Index final_overwritten_element_id; +}; + +/** VectorListHybrid combines some functionality of std::vector + * and std::list, with the following goals: + * + * Objects are stored internally inside a std::vector. + * + * UNLIKE STL linked lists: erasure/insertion does NOT cause dynamic + * memory allocation/deallocation (except when more space + * is needed, in which case a vector reallocation takes place). + * + * All operations are O(1), except insertions which are amortised O(1) + * (because a vector reallocation may be needed for more storage space). + * + * Objects are not actually destroyed, they are merely marked for later reuse. + * Thus this class is good when objects are expensive to construct, + * but cheap to reuse and clear, and will be reused many times. + * (E.g., imagine a std::vector> being repeatedly resized; + * all those inner std::vector are repeatedly deallocated and reallocated). + * + * Objects can be accessed at any position, via an ID (like a vector index). + * + * Erasure/insertion does NOT invalidate other IDs, unless that element + * was erased (or the whole container cleared). + * + * NOTE: "previous" and "next" directions, by analogy with std::vector, + * correspond to the logical order the elements are regarded to have, + * AS IF they sat in a vector which we iterated through in forwards + * order (which, of course, is unrelated to where they are actually stored + * internally). Thus, "next", "forward" moves go towards the BACK; "previous", + * "backward" moves go towards the FRONT. This should not confuse if we remember + * std::vector itself, with begin() and rbegin() iterators. + * + * TODO: there are no O(log N) operations, and no checks for invalid indices. + * This could be achieved by wrapping this class and storing + * sets/maps of erased/inserted IDs, etc. etc. Then everything would become + * O(log N) or amortised O(log N) instead of O(1), but we'd also have + * complete checks. + * + * TODO: this class should have its own tests. Right now it is only used + * in other things (SwapListOptimiser) which do have end-to-end tests, + * so it's quite reliable but not as reliable as it could be. + * + * TODO: Once this is well tested, move it to Utils for wider use. + */ +template +class VectorListHybrid { + public: + /** NOTE: the ID is NOT necessarily an actual vector index; + * that's an implementation detail. + */ + typedef tsa_internal::VectorListHybridSkeleton::Index ID; + + VectorListHybrid(); + + /** Returns an ID which is guaranteed NEVER to be valid. + * @return an ID value guaranteed NEVER to be valid. + */ + static ID get_invalid_id(); + + /** Logical clear: doesn't actually delete the elements, + * just relabels them for reuse. Time O(N). + * After this, all data - even IDs - will behave AS IF + * it were a new object. + */ + void clear(); + + /** Logical clear: doesn't actually delete the elements, + * just relabels them for reuse. Time O(1). + * After calling this function, IDs related to + * inserting/erasing elements may be different from + * those which would be obtained by the same sequence + * of operations on a new object. + */ + void fast_clear(); + + /** Like std::reverse, reverses the (logical) order of the elements. (Not the + * physical order: the internal vector of T objects is unchanged, only the + * links are changed). Existing ids may be invalidated. Time O(n). + */ + void reverse(); + + bool empty() const; + + /** The number of valid elements stored (not, of course, the actual + * internal number of elements, which is larger if some are waiting + * to be reused). + * @return The number of active elements stored. + */ + size_t size() const; + + /** Exactly like std::vector push_back. Fine if T is lightweight. + * Otherwise, maybe better to reuse elements. + * @param elem The T object to be copied and stored. + */ + void push_back(const T& elem); + + /** Like push_back, creates a new element after the current back, + * but returns the ID for the new element (which of course might not + * really be new; it is for reuse - it may be an old T object). + * Of course the returned ID is the same as would be obtained + * from back_id(). + * @return The ID of the newly created (or reused) element. + */ + ID emplace_back(); + + /** Erase the element at the back, but no checks for validity. */ + void pop_back(); + + /** Like push_back, but instead inserts the new element before + * the existing front element (so that it becomes the new front). + * @param elem The T object to be copied and stored. + */ + void push_front(const T& elem); + + /** Like emplace_back(), but creates the new element at the front, + * like push_front. However, returns the ID of the new object + * at the front. + * @return The ID of the newly created (or reused) element, at the front. + */ + ID emplace_front(); + + /** Erase the element at the front, but no checks for validity. */ + void pop_front(); + + /** Creates a new element after the existing one (not checked). + * @param id The ID of an existing element. + * @return The ID of the new element, inserted immediately after + * (i.e., "next"; towards the BACK) of the given element. + */ + ID insert_after(ID id); + + /** Creates a new element before the existing one (not checked). + * @param id The ID of an existing element. + * @return The ID of the new element, inserted immediately before + * (i.e., "previous"; towards the FRONT) of the given element. + */ + ID insert_before(ID id); + + /** Just like std::vector back(). + * Retrieve the element for reuse; must exist! + * @return A reference to the existing element at the back. + */ + T& back(); + + /** Retrieve the element for reuse; must exist! + * @return A reference to the existing element at the front. + */ + T& front(); + + /** Retrieve the stored element at the existing ID (not checked!) + * @param id The ID of an existing element. + * @return A reference to the element. + */ + T& at(ID id); + + /** Retrieve the stored element at the existing ID (not checked!) + * @param id The ID of an existing element. + * @return A reference to the element. + */ + const T& at(ID) const; + + /** Get the element ID after the given one (which MUST be valid), + * or a null ID if we're already at the back. + * @param id The ID of an existing element. + * @return The ID of the element after it (towards the BACK), + * or null if it doesn't exist. + */ + std::optional next(ID id) const; + + /** Get the ID of the element after the given one. + * @param id The ID of an existing element, OR null if none exists. + * @return The ID of the element after it (towards the BACK), + * OR null if it doesn't exist, or no ID was specified. + */ + std::optional next(std::optional id) const; + + /** Like next. Get the element ID before the given one (which MUST be valid), + * or a null ID if we're already at the front. + * @param id The ID of an existing element. + * @return The ID of the element before it (towards the FRONT), + * or null if it doesn't exist. + */ + std::optional previous(ID id) const; + + /** The ID of the back() element, if it exists. + * @return The ID of the element at back(), or null if there is none. + */ + std::optional back_id() const; + + /** The ID of the front() element, if it exists. + * @return The ID of the element at front(), or null if there is none. + */ + std::optional front_id() const; + + /** Erase the element with that ID, whilst updating other links + * (the ID must actually exist). + * @param id The ID of the existing element to erase. + */ + void erase(ID id); + + /** Starting with the given ID, erase the given number of elements. + * Equivalent to looping with erase() and next(), but more efficient. + * The list MUST contain enough elements to erase. + * @param id The ID of the initial existing element to erase. Must be valid. + * @param number_of_elements The number of elements to erase. The list MUST + * contain enough elements to be erased. + */ + void erase_interval(ID id, size_t number_of_elements); + + /** Starting with the given ID, and given cbegin, cend iterators to a + * container of T objects, overwrite whatever T objects are currently stored + * in the list with the new T objects. The list MUST be big enough to allow + * overwriting all of them. The container of T objects MUST be nonempty. + * @param id The ID of the initial existing T element to overwrite. Must be + * valid. + * @param new_elements_cbegin Const iterator to the start of a sequence of new + * T elements. + * @param new_elements_cend Const iterator to the cend of a sequence of new T + * elements. + * @return The ID of the last T element that was overwritten; MUST be valid! + */ + template + OverwriteIntervalResult overwrite_interval( + ID id, const CIter& new_elements_cbegin, const CIter& new_elements_cend); + + /** Returns an ordinary vector of the data (in the correct order, + * maybe not the same as the internal storage order of course). + * @return A copy of the valid T objects stored, in the correct LOGICAL + * order, AS IF they had been inserted into a vector object throughout. + * (Of course, probably not the same as the actual storage order). + */ + std::vector to_vector() const; + + /** Doesn't clear the vector, but copies all elements to the end of it. + * @param vect A vector, which will have all the valid elements in this + * object pushed back to it. + */ + void append_to_vector(std::vector& vect) const; + + /** Only for debugging purposes. + * @return A string giving further details of the internal data. + */ + std::string debug_str() const; + + private: + tsa_internal::VectorListHybridSkeleton m_links_data; + + /// The actual stored elements. + std::vector m_data; + + /** Returns the ID if valid, or null if not. + * @param id An ID, maybe invalid. + * @return The ID again, if valid, or null if not. + */ + static std::optional optional_id(ID id); + + /** Checks if m_data is big enough for the ID (which is really an index, + * returned by m_links_data). If not, resizes m_data if necessary, + * and just returns the ID unchanged. + * @param id An ID, valid for m_links_data, but maybe not for m_data. + * @return The passed in ID, but now definitely valid. + */ + ID get_checked_new_id(ID id); + + /** The list must currently be empty (but not checked). Creates a new + * element, resizes m_data if necessary, and returns the ID. + * @return The ID of the newly created (or reused) element. + */ + ID insert_for_empty_list(); +}; + +template +VectorListHybrid::VectorListHybrid() {} + +template +typename VectorListHybrid::ID VectorListHybrid::get_invalid_id() { + return tsa_internal::VectorListHybridSkeleton::get_invalid_index(); +} + +template +std::optional::ID> +VectorListHybrid::optional_id(ID id) { + if (id == tsa_internal::VectorListHybridSkeleton::get_invalid_index()) { + return {}; + } + return id; +} + +template +void VectorListHybrid::clear() { + m_links_data.clear(); +} + +template +void VectorListHybrid::fast_clear() { + m_links_data.fast_clear(); +} + +template +void VectorListHybrid::reverse() { + m_links_data.reverse(); +} +template +bool VectorListHybrid::empty() const { + return m_links_data.size() == 0; +} + +template +size_t VectorListHybrid::size() const { + return m_links_data.size(); +} + +template +void VectorListHybrid::push_back(const T& elem) { + emplace_back(); + back() = elem; +} + +template +typename VectorListHybrid::ID VectorListHybrid::emplace_back() { + if (empty()) { + insert_for_empty_list(); + } else { + insert_after(m_links_data.back_index()); + } + return m_links_data.back_index(); +} + +template +void VectorListHybrid::pop_back() { + erase(m_links_data.back_index()); +} + +template +void VectorListHybrid::push_front(const T& elem) { + emplace_front(); + front() = elem; +} + +template +typename VectorListHybrid::ID VectorListHybrid::emplace_front() { + if (empty()) { + insert_for_empty_list(); + } else { + insert_before(m_links_data.front_index()); + } + return m_links_data.front_index(); +} + +template +void VectorListHybrid::pop_front() { + erase(m_links_data.front_index()); +} + +template +typename VectorListHybrid::ID VectorListHybrid::insert_for_empty_list() { + m_links_data.insert_for_empty_list(); + return get_checked_new_id(m_links_data.front_index()); +} + +template +typename VectorListHybrid::ID VectorListHybrid::insert_after( + VectorListHybrid::ID id) { + m_links_data.insert_after(id); + return get_checked_new_id(m_links_data.next(id)); +} + +template +typename VectorListHybrid::ID VectorListHybrid::insert_before( + VectorListHybrid::ID id) { + m_links_data.insert_before(id); + return get_checked_new_id(m_links_data.previous(id)); +} + +template +T& VectorListHybrid::back() { + return m_data[m_links_data.back_index()]; +} + +template +T& VectorListHybrid::front() { + return m_data[m_links_data.front_index()]; +} + +template +T& VectorListHybrid::at(ID id) { + return m_data[id]; +} + +template +const T& VectorListHybrid::at(ID id) const { + return m_data[id]; +} + +template +std::optional::ID> VectorListHybrid::next( + ID id) const { + const ID index = m_links_data.next(id); + return optional_id(index); +} + +template +std::optional::ID> VectorListHybrid::next( + std::optional id) const { + return next(id.value()); +} + +template +std::optional::ID> VectorListHybrid::previous( + ID id) const { + return optional_id(m_links_data.previous(id)); +} + +template +std::optional::ID> VectorListHybrid::back_id() + const { + return optional_id(m_links_data.back_index()); +} + +template +std::optional::ID> VectorListHybrid::front_id() + const { + return optional_id(m_links_data.front_index()); +} + +template +void VectorListHybrid::erase(ID id) { + m_links_data.erase(id); +} + +template +void VectorListHybrid::erase_interval( + typename VectorListHybrid::ID id, size_t number_of_elements) { + m_links_data.erase_interval(id, number_of_elements); +} + +template +template +OverwriteIntervalResult VectorListHybrid::overwrite_interval( + typename VectorListHybrid::ID id, const CIter& new_elements_cbegin, + const CIter& new_elements_cend) { + // The links are unchanged; only the elements need to be changed. + OverwriteIntervalResult result; + result.final_overwritten_element_id = id; + CIter citer = new_elements_cbegin; + TKET_ASSERT(citer != new_elements_cend); + const auto max_number_of_elements = m_links_data.size(); + result.number_of_overwritten_elements = 0; + for (;;) { + m_data.at(result.final_overwritten_element_id) = *citer; + ++result.number_of_overwritten_elements; + // GCOVR_EXCL_START + TKET_ASSERT( + result.number_of_overwritten_elements <= max_number_of_elements); + // GCOVR_EXCL_STOP + ++citer; + if (citer == new_elements_cend) { + return result; + } + // There IS another element, where will it be overwritten? + result.final_overwritten_element_id = + m_links_data.next(result.final_overwritten_element_id); + } + // Should be impossible to reach here + TKET_ASSERT(false); +} + +template +void VectorListHybrid::append_to_vector(std::vector& vect) const { + vect.reserve(vect.size() + size()); + for (ID current_index = m_links_data.front_index(); + current_index != m_links_data.get_invalid_index(); + current_index = m_links_data.next(current_index)) { + vect.emplace_back(m_data[current_index]); + } +} + +template +std::vector VectorListHybrid::to_vector() const { + std::vector result; + append_to_vector(result); + return result; +} + +template +typename VectorListHybrid::ID VectorListHybrid::get_checked_new_id( + ID id) { + if (m_data.size() <= id) { + m_data.resize(id + 1); + } + return id; +} + +template +std::string VectorListHybrid::debug_str() const { + std::stringstream ss; + ss << "\nRaw stored elems:"; + for (size_t nn = 0; nn < m_data.size(); ++nn) { + ss << "\nData[" << nn << "] = " << m_data[nn]; + } + ss << "\n" << m_links_data.debug_str() << "\n"; + return ss.str(); +} + +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp new file mode 100644 index 0000000000..e8b3b64fde --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/VectorListHybridSkeleton.hpp @@ -0,0 +1,165 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +namespace tket { +namespace tsa_internal { + +/** This contains only support data and algorithms for VectorListHybrid, + * a data structure combining features of std::vector and linked lists. + * No checks for invalidated or never valid indices. + * This keeps track of indices for a std::vector of data, + * without actually holding any other data itself. + * Throughout, "after", "before", "next", "previous", "front", "back" + * refer to the logical ordering, AS IF elements were being inserted into + * and erased from a std::vector, but NOT the actual order in which elements + * are stored in an actual implementation (like VectorListHybrid). + * Erased elements are not actually erased, they are reused. + */ +class VectorListHybridSkeleton { + public: + /** Represents actual indices for a std::vector, which SHOULD store + * the objects we care about (templated on the object type; but this + * class stores no data except indexing information). + */ + typedef size_t Index; + + VectorListHybridSkeleton(); + + /** "Null" indices will always be represented by this value. + * @return An index value which is guaranteed NEVER to be valid. + */ + static Index get_invalid_index(); + + /** Indices will be valid until that element is erased, + * or clear() is called, regardless of other insertions/erasures. + * A "logical" clear; does not actually clear any data, + * "erased" elements will be reused. + * But, this is time O(n) because existing internal links will be + * reset to default values. + */ + void clear(); + + /** Time O(1), does not erase internal link indices. Identical erase/insert + * calls after fast_clear() calls (i.e., respecting the ordering, but + * ignoring the internal indices) will result in the same logical list, + * BUT the returned Index values may be different. + */ + void fast_clear(); + + /** Reverses the logical order of the elements. Time O(n). */ + void reverse(); + + /** The number of elements currently stored; + * NOT equal to the underlying vector size! + * @return The number of valid elements stored. + */ + size_t size() const; + + /** The index of the front element (or the same index as returned by + * get_invalid_index() if currently empty). + * @return The index of the front element. + */ + Index front_index() const; + + /** The index of the back element (or the same index as returned by + * get_invalid_index() if currently empty). + * @return The index of the back element. + */ + Index back_index() const; + + // All input indices MUST be currently valid, + // but this is not checked. (Checking would need O(log N) time, + // since we'd have to use maps and sets). + + /** The index of the next element after the given one. + * @param index The index of a valid element (not checked). + * @return The index of the next element (or the same index as returned by + * get_invalid_index() if no next element exists). + */ + Index next(Index index) const; + + /** The index of the previous element before the given one. + * @param index The index of a valid element (not checked). + * @return The index of the previous element (or the same index as returned + * by get_invalid_index() if no previous element exists). + */ + Index previous(Index index) const; + + /** "Logical" erase of the element (the position is marked for reuse). + * @param index The index of a valid element (not checked). + */ + void erase(Index index); + + /** Logical erase of an interval of linked elements (a, next(a), + * next(next(a)), ...). Equivalent to looping with erase() and next(), but + * more efficient. The list MUST contain enough elements to erase. + * @param index The index of a valid element to start erasing at (not + * checked). + * @param number_of_elements Number of elements to erase; these MUST exist + * (the list must be big enough). + */ + void erase_interval(Index index, size_t number_of_elements); + + /** The list must currently be empty, but not checked. */ + void insert_for_empty_list(); + + /** Insert a new element after the existing one. + * @param index The index of a valid element (not checked). + */ + void insert_after(Index index); + + /** Insert a new element before the existing one. + * @param index The index of a valid element (not checked). + */ + void insert_before(Index index); + + /** A platform-independent string which can be copied into tests. + * @return A string representing the current data, useful for testing. + */ + std::string debug_str() const; + + private: + struct Link { + Index previous; + Index next; + }; + + std::vector m_links; + size_t m_size; + Index m_front; + Index m_back; + + // Deleted elements will form a second linked list for reuse + // inside the data. TRICK: forward list only, + // no need for doubly linked lists. + Index m_deleted_front; + + /** Resizes m_links if necessary to ensure that the new index + * is valid (but will reuse erased elements if possible). + * However, DOESN'T set the "previous" and "next" data; + * the caller must do that (depending on what they're doing. + * Thus, it's initially an "orphan" link). + * @return A valid index for a new Link object (but with unset fields). + */ + Index get_new_index(); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp new file mode 100644 index 0000000000..d1e8677b42 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexMapResizing.hpp @@ -0,0 +1,120 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +#include "TokenSwapping/NeighboursInterface.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** If a vertex mapping { u -> v } has too few vertices, try to add extra + * vertices, fixed by the new mapping, to get to the desired size. This may + * allow extra optimisations to be found in the table. E.g., imagine a vertex in + * a graph which is not moved by the mapping. Imagine that removing it makes the + * graph disconnected. If the desired mapping moves a token + * between different components, it is then impossible for any swap + * sequence within the subgraph to perform that mapping. + * However, adding the vertex back makes it possible. + * + * If instead there are too many vertices to look up in the table, it tries + * to remove vertices which are fixed by the mapping to get it down to size. + */ +class VertexMapResizing : public NeighboursInterface { + public: + /** Store a Neighbours object, to be used throughout when required to find + * all neighbours of a given vertex. The caller must ensure that the + * object remains valid. + * @param neighbours The object to calculate neighbours of a vertex. + */ + explicit VertexMapResizing(NeighboursInterface& neighbours); + + /** Gets the data by calling the NeighboursInterface object which was passed + * into the constructor. HOWEVER, it does internal caching, so doesn't call it + * multiple times. + * @param vertex A vertex in the graph. + * @return A cached list of neighbours of that vertex, stored internally. + */ + virtual const std::vector& operator()(size_t vertex) override; + + /** The result of resizing a mapping by deleting fixed vertices if too big, + * or adding new vertices if too small. + */ + struct Result { + /** It is still a success if we have fewer vertices than the desired number + * (as this can still be looked up in the table). However, it's a failure if + * there are too many vertices (which than cannot be looked up). + */ + bool success; + + /** If successful, the edges of the subgraph containing only the vertices in + * the new mapping. */ + std::vector edges; + }; + + /** The mapping may be altered, even upon failure, so obviously the caller + * should make a copy if it needs to be preserved. Increase the map size as + * much as possible if too small (still a success even if it cannot reach the + * size). Decrease the size if too large (and not reaching the szie is then a + * failure). Newly added or removed vertices are all fixed, i.e. map[v]=v. + * @param mapping The mapping which will be altered and returned by reference. + * @param desired_size The size we wish to reach, or as close as possible if + * the mapping is currently too small. + */ + const Result& resize_mapping( + VertexMapping& mapping, unsigned desired_size = 6); + + private: + NeighboursInterface& m_neighbours; + Result m_result; + + // KEY: a vertex. VALUE: all its neighbours. + std::map> m_cached_neighbours; + std::set m_cached_full_edges; + + /** How many edges join the given vertex to other existing vertices? + * @param mapping The current vertex permutation which we may expand or + * contract. + * @param vertex A vertex which may or may not be already within the mapping. + * @return The total number of edges within the LARGER graph joining the + * vertex to other vertices within the mapping. + */ + size_t get_edge_count(const VertexMapping& mapping, size_t vertex); + + /** Try to add a single new fixed vertex to the mapping, i.e. a new v with + * map[v]=v. + * @param mapping The current vertex permutation which we wish to expand by + * one vertex. + */ + void add_vertex(VertexMapping& mapping); + + /** Try to remove a single vertex within the mapping, but only if it is fixed, + * i.e. map[v]==v. + * @param mapping The current vertex permutation which we wish to shrink by + * one vertex. + */ + void remove_vertex(VertexMapping& mapping); + + /** Within the m_result object, fill "edges" for the new mapping. */ + void fill_result_edges(const VertexMapping& mapping); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp new file mode 100644 index 0000000000..1226c64acf --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexMappingFunctions.hpp @@ -0,0 +1,84 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "SwapFunctions.hpp" + +namespace tket { + +/// The desired result of swapping is to move a token on each "key" +/// vertex to the "value" vertex. +typedef std::map VertexMapping; + +/** Are all tokens on their target vertices? + * @param vertex_mapping The desired mapping. + * @return Whether all tokens are on their target vertices. + */ +bool all_tokens_home(const VertexMapping& vertex_mapping); + +/** Does nothing, except throwing if the mapping is invalid. + * @param vertex_mapping The desired mapping, to be checked. + */ +void check_mapping(const VertexMapping& vertex_mapping); + +/** When you've already got another expendable VertexMapping object, + * it saves time to reuse instead of constructing a new one. + * @param vertex_mapping The desired mapping, to be checked. + * @param work_mapping A disposable object, will be overwritten. + */ +void check_mapping( + const VertexMapping& vertex_mapping, VertexMapping& work_mapping); + +/** We have a path [v(1), v(2), v(3), ..., v(N)]. + * Calculate individual swaps along this path (i.e., using only + * Swap(v(i), v(i+1)) which we know are valid), which would swap the tokens + * (if any) on v(1), v(N), and perform the swaps. + * Only append nonempty swaps (i.e., where at least one token is moved). + * @param path The path (must be an actual possible path), whose start + * and end vertices are to be swapped (with all other vertices) + * @param vertex_mapping The source to target mapping, which will be updated. + * @param swap_list The list of swaps, which will be updated. + */ +void append_swaps_to_interchange_path_ends( + const std::vector& path, VertexMapping& vertex_mapping, + SwapList& swap_list); + +/** Given a source->target vertex mapping and a TARGET vertex, find the + * corresponding source vertex. If the given target vertex does not appear in + * the map, create it as a new fixed vertex, i.e. map[v] = v for the given + * target vertex v. + * @param source_to_target_map A source->target vertex mapping. + * @param target_vertex A target vertex, to find in the map. + * @return The source vertex corresponding to the target (possibly newly created + * if the target was not present). + */ +size_t get_source_vertex( + VertexMapping& source_to_target_map, size_t target_vertex); + +/** We currently have a source->target mapping. Perform the vertex swap, + * but if any vertex in the swap is not present in the map, add it to the map as + * a new source vertex. + * Note that, since we DON'T have a target->source map, we have to do an O(N) + * search to find all target vertices. + * @param source_to_target_map The map to update with the swap. + * @param swap The swap to perform. + */ +void add_swap(VertexMapping& source_to_target_map, const Swap& swap); + +} // namespace tket diff --git a/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp b/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp new file mode 100644 index 0000000000..a8742a2670 --- /dev/null +++ b/tket/src/TokenSwapping/include/TokenSwapping/VertexSwapResult.hpp @@ -0,0 +1,59 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** For performing a vertex swap, and checking how many tokens moved. */ +struct VertexSwapResult { + /** How many tokens moved? Must be one of 0,1,2. */ + unsigned tokens_moved; + + /** Carry out the swap on the tokens and get the result. + * @param swap The swap to perform. + * @param vertex_mapping The source to target mapping, + * will be updated with the swap. + */ + VertexSwapResult(const Swap& swap, VertexMapping& vertex_mapping); + + /** Pass in the two vertex size_t numbers directly. + * @param v1 First vertex of the swap to perform. + * @param v2 Second vertex of the swap to perform. + * @param vertex_mapping The source to target mapping, + * will be updated with the swap. + */ + VertexSwapResult(size_t v1, size_t v2, VertexMapping& vertex_mapping); + + /** If the swap moves at least one nonempty token, carry out the swap. + * Otherwise, does nothing. + * @param v1 First vertex of the swap to perform. + * @param v2 Second vertex of the swap to perform. + * @param vertex_mapping The source to target mapping, + * will be updated with the swap. + * @param swap_list The list of swaps, which will be updated with the swap. + */ + VertexSwapResult( + size_t v1, size_t v2, VertexMapping& vertex_mapping, SwapList& swap_list); +}; + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/src/Transformations/Combinator.cpp b/tket/src/Transformations/Combinator.cpp index d1ed6c9ebc..1abdd7d43d 100644 --- a/tket/src/Transformations/Combinator.cpp +++ b/tket/src/Transformations/Combinator.cpp @@ -14,6 +14,8 @@ #include "Combinator.hpp" +#include + #include "Transform.hpp" namespace tket { @@ -26,38 +28,38 @@ Transform operator>>(const Transform &lhs, const Transform &rhs) { namespace Transforms { Transform sequence(std::vector &tvec) { - return Transform([=](Circuit &circ) { + return Transform([=](Circuit &circ, std::shared_ptr maps) { bool success = false; for (std::vector::const_iterator it = tvec.begin(); it != tvec.end(); ++it) { - success = it->apply(circ) || success; + success = it->apply_fn(circ, maps) || success; } return success; }); } Transform repeat(const Transform &trans) { - return Transform([=](Circuit &circ) { + return Transform([=](Circuit &circ, std::shared_ptr maps) { bool success = false; - while (trans.apply(circ)) success = true; + while (trans.apply_fn(circ, maps)) success = true; return success; }); } Transform repeat_with_metric( const Transform &trans, const Transform::Metric &eval) { - return Transform([=](Circuit &circ) { + return Transform([=](Circuit &circ, std::shared_ptr maps) { bool success = false; int currentVal = eval(circ); Circuit *currentCircuit = ˆ Circuit newCircuit = circ; - trans.apply(newCircuit); + trans.apply_fn(newCircuit, maps); int newVal = eval(newCircuit); while (newVal < currentVal) { currentCircuit = &newCircuit; currentVal = newVal; success = true; - trans.apply(newCircuit); + trans.apply_fn(newCircuit, maps); newVal = eval(newCircuit); } if (&circ != currentCircuit) circ = *currentCircuit; @@ -66,11 +68,11 @@ Transform repeat_with_metric( } Transform repeat_while(const Transform &cond, const Transform &body) { - return Transform([=](Circuit &circ) { + return Transform([=](Circuit &circ, std::shared_ptr maps) { bool success = false; - while (cond.apply(circ)) { + while (cond.apply_fn(circ, maps)) { success = true; - body.apply(circ); + body.apply_fn(circ, maps); } return success; }); diff --git a/tket/src/Transformations/ControlledGates.cpp b/tket/src/Transformations/ControlledGates.cpp index 73bd2e0f2c..27fa953eb4 100644 --- a/tket/src/Transformations/ControlledGates.cpp +++ b/tket/src/Transformations/ControlledGates.cpp @@ -21,6 +21,7 @@ #include "Circuit/CircPool.hpp" #include "Circuit/DAGDefs.hpp" +#include "OpType/OpType.hpp" #include "Transform.hpp" #include "Utils/EigenConfig.hpp" #include "Utils/HelperFunctions.hpp" @@ -432,22 +433,25 @@ static Circuit lemma71( if (rep.n_gates() != correct_gate_count) throw ControlDecompError("Error in Lemma 7.1: Gate count is incorrect"); auto [vit, vend] = boost::vertices(rep.dag); + VertexSet bin; for (auto next = vit; vit != vend; vit = next) { ++next; Vertex v = *vit; - if (rep.get_OpType_from_Vertex(v) == OpType::CRy) { - Expr v_angle = rep.get_Op_ptr_from_Vertex(v)->get_params()[0]; - Circuit cry_replacement = CircPool::CRy_using_CX(v_angle); - Subcircuit sub{rep.get_in_edges(v), rep.get_all_out_edges(v), {v}}; - rep.substitute(cry_replacement, sub, Circuit::VertexDeletion::Yes); - } - if (rep.get_OpType_from_Vertex(v) == OpType::CU1) { - Expr v_angle = rep.get_Op_ptr_from_Vertex(v)->get_params()[0]; - Circuit cu1_replacement = CircPool::CU1_using_CX(v_angle); - Subcircuit sub{rep.get_in_edges(v), rep.get_all_out_edges(v), {v}}; - rep.substitute(cu1_replacement, sub, Circuit::VertexDeletion::Yes); + if (!bin.contains(v)) { + OpType optype = rep.get_OpType_from_Vertex(v); + if (optype == OpType::CRy || optype == OpType::CU1) { + Expr v_angle = rep.get_Op_ptr_from_Vertex(v)->get_params()[0]; + Circuit replacement = (optype == OpType::CRy) + ? CircPool::CRy_using_CX(v_angle) + : CircPool::CU1_using_CX(v_angle); + Subcircuit sub{rep.get_in_edges(v), rep.get_all_out_edges(v), {v}}; + rep.substitute(replacement, sub, Circuit::VertexDeletion::No); + bin.insert(v); + } } } + rep.remove_vertices( + bin, Circuit::GraphRewiring::No, Circuit::VertexDeletion::Yes); return rep; } diff --git a/tket/src/Transformations/Decomposition.cpp b/tket/src/Transformations/Decomposition.cpp index 21b39e1eb4..511bd5caf9 100644 --- a/tket/src/Transformations/Decomposition.cpp +++ b/tket/src/Transformations/Decomposition.cpp @@ -240,7 +240,8 @@ Transform decompose_tk1_to_rzrx() { success = true; const Op_ptr g = circ.get_Op_ptr_from_Vertex(*it); const std::vector ¶ms = g->get_params(); - Circuit newcirc = tk1_to_rzrx(params[0], params[1], params[2]); + Circuit newcirc = + CircPool::tk1_to_rzrx(params[0], params[1], params[2]); Subcircuit sc = { {circ.get_in_edges(*it)}, {circ.get_all_out_edges(*it)}, {*it}}; circ.substitute(newcirc, sc, Circuit::VertexDeletion::Yes); @@ -746,6 +747,17 @@ Transform decomp_boxes() { Transform compose_phase_poly_boxes() { return Transform([](Circuit &circ) { + // replace wireswaps with three CX + while (circ.has_implicit_wireswaps()) { + qubit_map_t perm = circ.implicit_qubit_permutation(); + for (const std::pair &pair : perm) { + if (pair.first != pair.second) { + circ.replace_implicit_wire_swap(pair.first, pair.second); + break; + } + } + } + CircToPhasePolyConversion conv = CircToPhasePolyConversion(circ); conv.convert(); circ = conv.get_circuit(); diff --git a/tket/src/Transformations/OptimisationPass.cpp b/tket/src/Transformations/OptimisationPass.cpp index f4bcde28d2..fa9cbe65c0 100644 --- a/tket/src/Transformations/OptimisationPass.cpp +++ b/tket/src/Transformations/OptimisationPass.cpp @@ -15,6 +15,7 @@ #include "OptimisationPass.hpp" #include "BasicOptimisation.hpp" +#include "Circuit/CircPool.hpp" #include "Circuit/CircUtils.hpp" #include "CliffordOptimisation.hpp" #include "CliffordReductionPass.hpp" @@ -136,8 +137,9 @@ Transform synthesise_UMD() { OpType type = op_ptr->get_type(); if (type == OpType::TK1) { std::vector tk1_angles = as_gate_ptr(op_ptr)->get_tk1_angles(); - Circuit in_circ = - tk1_to_PhasedXRz(tk1_angles[0], tk1_angles[1], tk1_angles[2]); + Circuit in_circ = CircPool::tk1_to_PhasedXRz( + tk1_angles[0], tk1_angles[1], tk1_angles[2]); + remove_redundancies().apply(in_circ); Subcircuit sub = { {circ.get_in_edges(v)}, {circ.get_all_out_edges(v)}, {v}}; bin.push_back(v); diff --git a/tket/src/Transformations/Rebase.cpp b/tket/src/Transformations/Rebase.cpp index 9ede90b192..fedb1903f9 100644 --- a/tket/src/Transformations/Rebase.cpp +++ b/tket/src/Transformations/Rebase.cpp @@ -27,25 +27,24 @@ namespace tket { namespace Transforms { static bool standard_rebase( - Circuit& circ, const OpTypeSet& multiqs, const Circuit& cx_replacement, - const OpTypeSet& singleqs, + Circuit& circ, const OpTypeSet& allowed_gates, + const Circuit& cx_replacement, const std::function& tk1_replacement); Transform rebase_factory( - const OpTypeSet& multiqs, const Circuit& cx_replacement, - const OpTypeSet& singleqs, + const OpTypeSet& allowed_gates, const Circuit& cx_replacement, const std::function& tk1_replacement) { return Transform([=](Circuit& circ) { return standard_rebase( - circ, multiqs, cx_replacement, singleqs, tk1_replacement); + circ, allowed_gates, cx_replacement, tk1_replacement); }); } static bool standard_rebase( - Circuit& circ, const OpTypeSet& multiqs, const Circuit& cx_replacement, - const OpTypeSet& singleqs, + Circuit& circ, const OpTypeSet& allowed_gates, + const Circuit& cx_replacement, const std::function& tk1_replacement) { bool success = false; @@ -60,7 +59,7 @@ static bool standard_rebase( op = cond.get_op(); } OpType type = op->get_type(); - if (multiqs.find(type) != multiqs.end() || type == OpType::CX || + if (allowed_gates.find(type) != allowed_gates.end() || type == OpType::CX || type == OpType::Barrier) continue; // need to convert @@ -73,14 +72,12 @@ static bool standard_rebase( bin.push_back(v); success = true; } - if (multiqs.find(OpType::CX) == multiqs.end()) { + if (allowed_gates.find(OpType::CX) == allowed_gates.end()) { const Op_ptr cx_op = get_op_ptr(OpType::CX); success = circ.substitute_all(cx_replacement, cx_op) | success; } BGL_FORALL_VERTICES(v, circ.dag, DAG) { - if (circ.n_in_edges_of_type(v, EdgeType::Quantum) != 1 || - circ.n_in_edges_of_type(v, EdgeType::Quantum) != 1) - continue; + if (circ.n_in_edges_of_type(v, EdgeType::Quantum) != 1) continue; Op_ptr op = circ.get_Op_ptr_from_Vertex(v); bool conditional = op->get_type() == OpType::Conditional; if (conditional) { @@ -89,12 +86,13 @@ static bool standard_rebase( } OpType type = op->get_type(); if (!is_gate_type(type) || is_projective_type(type) || - singleqs.find(type) != singleqs.end()) + allowed_gates.find(type) != allowed_gates.end()) continue; // need to convert std::vector tk1_angles = as_gate_ptr(op)->get_tk1_angles(); Circuit replacement = tk1_replacement(tk1_angles[0], tk1_angles[1], tk1_angles[2]); + remove_redundancies().apply(replacement); if (conditional) { circ.substitute_conditional(replacement, v, Circuit::VertexDeletion::No); } else { @@ -116,188 +114,58 @@ Transform rebase_tket() { c.add_op(OpType::TK1, {alpha, beta, gamma}, {0}); return c; }; - return rebase_factory( - {OpType::CX}, CircPool::CX(), {OpType::TK1}, tk1_to_tk1); -} - -Circuit tk1_to_PhasedXRz( - const Expr& alpha, const Expr& beta, const Expr& gamma) { - Circuit c(1); - if (equiv_expr(beta, 1)) { - // Angles β ∈ {π, 3π} - c.add_op(OpType::PhasedX, {beta, (alpha - gamma) / 2.}, {0}); - } else if (equiv_expr(beta, 0)) { - // Angle β ∈ {0, 2π} - c.add_op(OpType::Rz, alpha + beta + gamma, {0}); - } else { - c.add_op(OpType::Rz, alpha + gamma, {0}); - c.add_op(OpType::PhasedX, {beta, alpha}, {0}); - } - remove_redundancies().apply(c); - return c; + return rebase_factory({OpType::CX, OpType::TK1}, CircPool::CX(), tk1_to_tk1); } Transform rebase_cirq() { return rebase_factory( - {OpType::CZ}, CircPool::H_CZ_H(), {OpType::PhasedX, OpType::Rz}, - tk1_to_PhasedXRz); -} - -Circuit tk1_to_rzrx(const Expr& alpha, const Expr& beta, const Expr& gamma) { - Circuit c(1); - c.add_op(OpType::Rz, gamma, {0}); - c.add_op(OpType::Rx, beta, {0}); - c.add_op(OpType::Rz, alpha, {0}); - remove_redundancies().apply(c); - return c; -} - -Circuit tk1_to_rzh(const Expr& alpha, const Expr& beta, const Expr& gamma) { - Circuit c(1); - std::optional cliff = equiv_Clifford(beta, 4); - if (cliff) { - switch (*cliff % 4) { - case 0: { - c.add_op(OpType::Rz, gamma + alpha, {0}); - break; - } - case 1: { - c.add_op(OpType::Rz, gamma - 0.5, {0}); - c.add_op(OpType::H, {0}); - c.add_op(OpType::Rz, alpha - 0.5, {0}); - c.add_phase(-0.5); - break; - } - case 2: { - c.add_op(OpType::Rz, gamma - alpha, {0}); - c.add_op(OpType::H, {0}); - c.add_op(OpType::Rz, 1., {0}); - c.add_op(OpType::H, {0}); - break; - } - case 3: { - c.add_op(OpType::Rz, gamma + 0.5, {0}); - c.add_op(OpType::H, {0}); - c.add_op(OpType::Rz, alpha + 0.5, {0}); - c.add_phase(-0.5); - break; - } - } - if (cliff >= 4u) c.add_phase(1.); - } else { - c.add_op(OpType::Rz, gamma, {0}); - c.add_op(OpType::H, {0}); - c.add_op(OpType::Rz, beta, {0}); - c.add_op(OpType::H, {0}); - c.add_op(OpType::Rz, alpha, {0}); - } - remove_redundancies().apply(c); - return c; -} - -static unsigned int_half(const Expr& angle) { - // Assume angle is an even integer - double eval = eval_expr(angle).value(); - return lround(eval / 2); -} - -Circuit tk1_to_rzsx(const Expr& alpha, const Expr& beta, const Expr& gamma) { - Circuit c(1); - Expr correction_phase = 0; - if (equiv_0(beta)) { - // b = 2k, if k is odd, then Rx(b) = -I - c.add_op(OpType::Rz, alpha + gamma, {0}); - correction_phase = int_half(beta); - } else if (equiv_0(beta + 1)) { - // Use Rx(2k-1) = i(-1)^{k}SxSx - correction_phase = -0.5 + int_half(beta - 1); - if (equiv_0(alpha - gamma)) { - // a - c = 2m - // overall operation is (-1)^{m}Rx(2k -1) - c.add_op(OpType::SX, {0}); - c.add_op(OpType::SX, {0}); - correction_phase += int_half(alpha - gamma); - } else { - c.add_op(OpType::Rz, gamma, {0}); - c.add_op(OpType::SX, {0}); - c.add_op(OpType::SX, {0}); - c.add_op(OpType::Rz, alpha, {0}); - } - } else if (equiv_0(beta - 0.5) && equiv_0(alpha) && equiv_0(gamma)) { - // a = 2k, b = 2m+0.5, c = 2n - // Rz(2k)Rx(2m + 0.5)Rz(2n) = (-1)^{k+m+n}e^{-i \pi /4} SX - c.add_op(OpType::SX, {0}); - correction_phase = - int_half(beta - 0.5) + int_half(alpha) + int_half(gamma) - 0.25; - } else if (equiv_0(alpha - 0.5) && equiv_0(gamma - 0.5)) { - // Rz(2k + 0.5)Rx(b)Rz(2m + 0.5) = -i(-1)^{k+m}SX.Rz(1-b).SX - c.add_op(OpType::SX, {0}); - c.add_op(OpType::Rz, 1 - beta, {0}); - c.add_op(OpType::SX, {0}); - correction_phase = int_half(alpha - 0.5) + int_half(gamma - 0.5) - 0.5; - } else { - c.add_op(OpType::Rz, gamma + 0.5, {0}); - c.add_op(OpType::SX, {0}); - c.add_op(OpType::Rz, beta - 1, {0}); - c.add_op(OpType::SX, {0}); - c.add_op(OpType::Rz, alpha + 0.5, {0}); - correction_phase = -0.5; - } - c.add_phase(correction_phase); - remove_redundancies().apply(c); - return c; -} - -Circuit tk1_to_tk1(const Expr& alpha, const Expr& beta, const Expr& gamma) { - Circuit c(1); - c.add_op(OpType::TK1, {alpha, beta, gamma}, {0}); - return c; + {OpType::CZ, OpType::PhasedX, OpType::Rz}, CircPool::H_CZ_H(), + CircPool::tk1_to_PhasedXRz); } Transform rebase_HQS() { return rebase_factory( - {OpType::ZZMax}, CircPool::CX_using_ZZMax(), - {OpType::PhasedX, OpType::Rz}, tk1_to_PhasedXRz); + {OpType::ZZMax, OpType::PhasedX, OpType::Rz}, CircPool::CX_using_ZZMax(), + CircPool::tk1_to_PhasedXRz); } Transform rebase_UMD() { return rebase_factory( - {OpType::XXPhase}, CircPool::CX_using_XXPhase_0(), - {OpType::PhasedX, OpType::Rz}, tk1_to_PhasedXRz); + {OpType::XXPhase, OpType::PhasedX, OpType::Rz}, + CircPool::CX_using_XXPhase_0(), CircPool::tk1_to_PhasedXRz); } Transform rebase_quil() { return rebase_factory( - {OpType::CZ}, CircPool::H_CZ_H(), {OpType::Rx, OpType::Rz}, tk1_to_rzrx); + {OpType::CZ, OpType::Rx, OpType::Rz}, CircPool::H_CZ_H(), + CircPool::tk1_to_rzrx); } Transform rebase_pyzx() { - OpTypeSet pyzx_multiqs = {OpType::SWAP, OpType::CX, OpType::CZ}; - OpTypeSet pyzx_singleqs = {OpType::H, OpType::X, OpType::Z, OpType::S, - OpType::T, OpType::Rx, OpType::Rz}; - return rebase_factory( - pyzx_multiqs, CircPool::CX(), pyzx_singleqs, tk1_to_rzrx); + OpTypeSet pyzx_gates = {OpType::SWAP, OpType::CX, OpType::CZ, OpType::H, + OpType::X, OpType::Z, OpType::S, OpType::T, + OpType::Rx, OpType::Rz}; + return rebase_factory(pyzx_gates, CircPool::CX(), CircPool::tk1_to_rzrx); } Transform rebase_projectq() { - OpTypeSet projectq_multiqs = { - OpType::SWAP, OpType::CRz, OpType::CX, OpType::CZ}; - OpTypeSet projectq_singleqs = {OpType::H, OpType::X, OpType::Y, OpType::Z, - OpType::S, OpType::T, OpType::V, OpType::Rx, - OpType::Ry, OpType::Rz}; - return rebase_factory( - projectq_multiqs, CircPool::CX(), projectq_singleqs, tk1_to_rzrx); + OpTypeSet projectq_gates = {OpType::SWAP, OpType::CRz, OpType::CX, OpType::CZ, + OpType::H, OpType::X, OpType::Y, OpType::Z, + OpType::S, OpType::T, OpType::V, OpType::Rx, + OpType::Ry, OpType::Rz}; + return rebase_factory(projectq_gates, CircPool::CX(), CircPool::tk1_to_rzrx); } Transform rebase_UFR() { return rebase_factory( - {OpType::CX}, CircPool::CX(), {OpType::Rz, OpType::H}, tk1_to_rzh); + {OpType::CX, OpType::Rz, OpType::H}, CircPool::CX(), + CircPool::tk1_to_rzh); } Transform rebase_OQC() { return rebase_factory( - {OpType::ECR}, CircPool::CX_using_ECR(), {OpType::Rz, OpType::SX}, - tk1_to_rzsx); + {OpType::ECR, OpType::Rz, OpType::SX}, CircPool::CX_using_ECR(), + CircPool::tk1_to_rzsx); } } // namespace Transforms diff --git a/tket/src/Transformations/include/Transformations/Rebase.hpp b/tket/src/Transformations/include/Transformations/Rebase.hpp index f5a185bfe7..ff0bca2095 100644 --- a/tket/src/Transformations/include/Transformations/Rebase.hpp +++ b/tket/src/Transformations/include/Transformations/Rebase.hpp @@ -23,10 +23,9 @@ namespace Transforms { // decomposes multiq gates not in the gate set to CXs, then replaces CXs with // the replacement (if CX is not allowed) then converts singleq gates no in // the gate set to U3 and replaces them using provided function Expects: any -// gates Produces: gates in multiqs and singleqs +// gates Produces: gates in allowed_gates Transform rebase_factory( - const OpTypeSet& multiqs, const Circuit& cx_replacement, - const OpTypeSet& singleqs, + const OpTypeSet& allowed_gates, const Circuit& cx_replacement, const std::function& tk1_replacement); @@ -67,18 +66,6 @@ Transform rebase_UFR(); // Singleqs: Rz, SX Transform rebase_OQC(); -// converts a TK1 gate to a PhasedXRz gate -Circuit tk1_to_PhasedXRz( - const Expr& alpha, const Expr& beta, const Expr& gamma); - -Circuit tk1_to_rzrx(const Expr& alpha, const Expr& beta, const Expr& gamma); - -Circuit tk1_to_rzh(const Expr& alpha, const Expr& beta, const Expr& gamma); - -Circuit tk1_to_rzsx(const Expr& alpha, const Expr& beta, const Expr& gamma); - -Circuit tk1_to_tk1(const Expr& alpha, const Expr& beta, const Expr& gamma); - } // namespace Transforms } // namespace tket diff --git a/tket/src/Transformations/include/Transformations/Transform.hpp b/tket/src/Transformations/include/Transformations/Transform.hpp index c0b39cd291..2905fe5ae0 100644 --- a/tket/src/Transformations/include/Transformations/Transform.hpp +++ b/tket/src/Transformations/include/Transformations/Transform.hpp @@ -15,32 +15,77 @@ #pragma once #include +#include #include "Circuit/Circuit.hpp" namespace tket { +/** + * A transformation of a circuit that preserves its semantics + */ class Transform { public: - typedef std::function Transformation; + /** + * A function that takes a circuit and (optionally) a relabelling of units. + * + * The relabelling, if present, maps the original unit IDs at the beginning + * and end of the circuit to new names, which may be in a different order at + * the beginning and end. + * + * The function returns false if no changes are made, otherwise true. + */ + typedef std::function)> + Transformation; + + /** + * A function that takes a circuit and does not rename any units. + * + * The function returns false if no changes are made, otherwise true. + */ + typedef std::function SimpleTransformation; + typedef std::function Metric; - // the actual transformation to be applied - // performs transformation in place and returns true iff made some change - Transformation apply; // this would ideally be `const`, but that deletes the - // copy assignment operator for Transform. + /** The transformation applied. */ + Transformation apply_fn; + + /** Construct from a transformation function */ + explicit Transform(const Transformation& trans) : apply_fn(trans) {} + + /** Construct from a transformation function that preserves unit IDs */ + explicit Transform(const SimpleTransformation& trans) + : apply_fn([=](Circuit& circ, std::shared_ptr) { + return trans(circ); + }) {} - explicit Transform(const Transformation& trans) : apply(trans) {} + /** + * Apply the transform to a circuit + * + * @param circ circuit to be transformed + * + * @return whether any changes were made + */ + bool apply(Circuit& circ) const { return apply_fn(circ, nullptr); } + /** + * Compose two transforms in sequence + * + * @param[in] lhs first transform + * @param[in] rhs second transform + * + * @return the composite transform + */ friend Transform operator>>(const Transform& lhs, const Transform& rhs); }; namespace Transforms { -// identity Transform (does nothing to Circuit) -inline const Transform id = Transform([](const Circuit&) { - return false; -}); // returns `false` as it does not change the Circuit in any way +/** + * Identity transform (does nothing, returns false) + */ +inline const Transform id = + Transform([](Circuit&, std::shared_ptr) { return false; }); } // namespace Transforms diff --git a/tket/src/Utils/AssertMessage.cpp b/tket/src/Utils/AssertMessage.cpp new file mode 100644 index 0000000000..4452dbaed7 --- /dev/null +++ b/tket/src/Utils/AssertMessage.cpp @@ -0,0 +1,43 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "AssertMessage.hpp" + +namespace tket { + +// GCOVR_EXCL_START +AssertMessage::AssertMessage() {} + +std::string& AssertMessage::get_error_message_ref() { + static std::string error_string; + return error_string; +} + +std::string AssertMessage::get_error_message() { + const std::string message = get_error_message_ref(); + // Asserts are SUPPOSED to lead to aborts, so clearing + // shouldn't be necessary; but anyway, in case it's + // called multiple times, clear ready for the next message. + get_error_message_ref().clear(); + return message; +} + +AssertMessage::operator bool() const { + // Store the built up error message. + get_error_message_ref() = m_ss.str(); + return false; +} +// GCOVR_EXCL_STOP + +} // namespace tket diff --git a/tket/src/Utils/CMakeLists.txt b/tket/src/Utils/CMakeLists.txt index 90c2c7e7ae..81da0d5f2c 100644 --- a/tket/src/Utils/CMakeLists.txt +++ b/tket/src/Utils/CMakeLists.txt @@ -21,9 +21,11 @@ endif() add_library(tket-${COMP} TketLog.cpp UnitID.cpp + AssertMessage.cpp HelperFunctions.cpp MatrixAnalysis.cpp PauliStrings.cpp + RNG.cpp CosSinDecomposition.cpp Expression.cpp) @@ -42,4 +44,6 @@ target_include_directories(tket-${COMP} ${TKET_${COMP}_INCLUDE_DIR} ${TKET_${COMP}_INCLUDE_DIR}/${COMP}) +target_link_libraries(tket-${COMP} PUBLIC + ${CONAN_LIBS_FMT} ${CONAN_LIBS_SPDLOG}) target_link_libraries(tket-${COMP} PRIVATE ${CONAN_LIBS_SYMENGINE}) diff --git a/tket/src/Utils/Expression.cpp b/tket/src/Utils/Expression.cpp index e24263b098..d81ca4617c 100644 --- a/tket/src/Utils/Expression.cpp +++ b/tket/src/Utils/Expression.cpp @@ -14,8 +14,11 @@ #include "Expression.hpp" +#include + #include "Constants.hpp" #include "Symbols.hpp" +#include "symengine/symengine_exception.h" namespace tket { @@ -57,7 +60,11 @@ std::optional eval_expr(const Expr& e) { if (!SymEngine::free_symbols(e).empty()) { return std::nullopt; } else { - return SymEngine::eval_double(e); + try { + return SymEngine::eval_double(e); + } catch (SymEngine::NotImplementedError&) { + return std::nullopt; + } } } diff --git a/tket/src/Utils/PauliStrings.cpp b/tket/src/Utils/PauliStrings.cpp index 4c2a4e0b04..9583800703 100644 --- a/tket/src/Utils/PauliStrings.cpp +++ b/tket/src/Utils/PauliStrings.cpp @@ -387,6 +387,7 @@ std::size_t hash_value(const QubitPauliString &qps) { } void to_json(nlohmann::json &j, const QubitPauliString &paulistr) { + j = nlohmann::json::array(); for (const auto &[qb, pauli] : paulistr.map) { j.push_back({qb, pauli}); } diff --git a/tket/tests/Graphs/RNG.cpp b/tket/src/Utils/RNG.cpp similarity index 97% rename from tket/tests/Graphs/RNG.cpp rename to tket/src/Utils/RNG.cpp index 1ee6fceff5..506d6237db 100644 --- a/tket/tests/Graphs/RNG.cpp +++ b/tket/src/Utils/RNG.cpp @@ -12,14 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "RNG.hpp" +#include "Utils/RNG.hpp" -using std::size_t; using std::vector; namespace tket { -namespace graphs { -namespace tests { size_t RNG::get_size_t(size_t max_value) { if (max_value == 0) { @@ -162,6 +159,4 @@ bool RNG::check_percentage(size_t percentage) { return get_size_t(99) < percentage; } -} // namespace tests -} // namespace graphs } // namespace tket diff --git a/tket/src/Utils/include/Utils/Assert.hpp b/tket/src/Utils/include/Utils/Assert.hpp index a2335deab5..ddc1342778 100644 --- a/tket/src/Utils/include/Utils/Assert.hpp +++ b/tket/src/Utils/include/Utils/Assert.hpp @@ -15,20 +15,90 @@ #pragma once #include -#include +#include "AssertMessage.hpp" #include "TketLog.hpp" /** - * If the condition `b` is not satisfied, log a diagnostic message and abort. + * If `condition` is not satisfied, log a diagnostic message and abort. + * You can abort with a fixed string: + * + * TKET_ASSERT(!"Some error message..."); + * + * For a simple statement like: + * + * TKET_ASSERT(xcritical(msg.str()); \ - std::abort(); \ - } \ +#define TKET_ASSERT(condition) \ + do { \ + try { \ + if (!(condition)) { \ + std::stringstream ss; \ + ss << "Assertion '" << #condition << "' (" << __FILE__ << " : " \ + << __func__ << " : " << __LINE__ << ") failed. " \ + << AssertMessage::get_error_message() << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } \ + } catch (const std::exception& ex) { \ + std::stringstream ss; \ + ss << "Evaluating assertion condition '" << #condition << "' (" \ + << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") threw unexpected exception: '" << ex.what() << "'. " \ + << AssertMessage::get_error_message() << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } catch (...) { \ + std::stringstream ss; \ + ss << "Evaluating assertion condition '" << #condition << "' (" \ + << __FILE__ << " : " << __func__ << " : " << __LINE__ \ + << ") Threw unknown exception. " \ + << AssertMessage::get_error_message() << " Aborting."; \ + tket::tket_log()->critical(ss.str()); \ + std::abort(); \ + } \ } while (0) diff --git a/tket/src/Utils/include/Utils/AssertMessage.hpp b/tket/src/Utils/include/Utils/AssertMessage.hpp new file mode 100644 index 0000000000..98b023eb57 --- /dev/null +++ b/tket/src/Utils/include/Utils/AssertMessage.hpp @@ -0,0 +1,59 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +namespace tket { + +// GCOVR_EXCL_START +/** This is only for use with TKET_ASSERT. + */ +class AssertMessage { + public: + /** Construct the object, to begin writing to the stream. */ + AssertMessage(); + + /** Always returns false, so that "|| AssertMessage() << a)" becomes + * "|| false)". + * Also, stores the error message for later use by TKET_ASSERT macros; + * previously this information was passed on by exceptions, but that + * generated lots of code coverage branching problems. */ + operator bool() const; + + /** Every streamable object x can be written to the stream. + * @param x Any object which can be written to a stringstream. + * @return This object, to allow chaining. + */ + template + AssertMessage& operator<<(const T& x) { + m_ss << x; + return *this; + } + + /** Get the stored error message. Of course, if AssertMessage() + * has not actually been called, just returns an empty string. + * Also, clears the stored message, ready for the next time. + */ + static std::string get_error_message(); + + private: + std::stringstream m_ss; + + static std::string& get_error_message_ref(); +}; +// GCOVR_EXCL_STOP + +} // namespace tket diff --git a/tket/tests/Graphs/RNG.hpp b/tket/src/Utils/include/Utils/RNG.hpp similarity index 63% rename from tket/tests/Graphs/RNG.hpp rename to tket/src/Utils/include/Utils/RNG.hpp index c7b6707731..7b3c17acec 100644 --- a/tket/tests/Graphs/RNG.hpp +++ b/tket/src/Utils/include/Utils/RNG.hpp @@ -20,8 +20,6 @@ #include namespace tket { -namespace graphs { -namespace tests { // Something like this is needed for proper random test data generation // if you want to be platform-independent, as the C++ standard is stupid. @@ -40,16 +38,15 @@ namespace tests { // even for something as simple as a uniform distribution. // The same applies to, e.g., std::random_shuffle. -/** - * TODO: move this to a better place, once decided where. - * A random number generator class. +/** A random number generator class. * Of course, this is only for random test data generation, * definitely NOT suitable for any kind of cryptography! * Note that there are no functions involving doubles anywhere! * Actually, double calculations can give very slightly different answers * across platforms, compilers, compiler optimisation settings; * the numerical difference is absolutely negligible, - * but it's worth being ultra cautious! + * but it's worth being ultra cautious! (And it's much easier for testing + * to get IDENTICAL results across platforms). */ class RNG { public: @@ -64,36 +61,46 @@ class RNG { * which can be returned. * @return A size_t from the inclusive range {0,1,2,...,N}. */ - std::size_t get_size_t(std::size_t max_value); + size_t get_size_t(size_t max_value); /** * Returns a number in the inclusive interval, including the endpoints. - * + * @param min_value The smallest value (inclusive) that can be returned. + * @param max_value The largest value (inclusive) that can be returned. * @return A size_t from the inclusive range {a, a+1, a+2, ... , b}. */ - std::size_t get_size_t(std::size_t min_value, std::size_t max_value); + size_t get_size_t(size_t min_value, size_t max_value); /** - * I believe that the behaviour on the Mersenne twister random engine - * is guaranteed by the C++ standard, although I'm not 100% sure. - * The standard specifies 5489u as the default initial seed, so it would - * be rather pointless to do that if the bits generated - * were still implementation-dependent. + * The behaviour of the RAW BITS of the Mersenne twister random engine + * is guaranteed by the C++ standard. + * The standard specifies 5489u as the default initial seed. + * @param seed A seed value, to alter the RNG state. + * By default, uses the value specified by the standard. */ - void set_seed(std::size_t seed); + void set_seed(size_t seed = 5489); /** Return true p% of the time. * (Very quick and dirty, doesn't check for, e.g., 110% effort...) * As mentioned above, we deliberately DON'T have a function returning * a uniform double. Sticking to integer values is safest. + * @param percentage The probability of returning true, expressed as + * a percentage. + * @return A random bool, returns true with specified probability. */ - bool check_percentage(std::size_t percentage); + bool check_percentage(size_t percentage); /** * Simply shuffle the elements around at random. - * Approximately uniform over all possible permutations. + * Approximately uniform "in practice" over all possible permutations. + * (Although of course, strictly speaking very far from uniform for larger + * vectors. The number of possible permutations grows very rapidly + * and quickly becomes larger than the total number of distinct states + * any fixed engine can take, no matter which is used. Thus, for larger + * vectors, only a small proportion of permutations are actually possible). * This is necessary because C++ random_shuffle is * implementation-dependent (see above comments). + * @param elements The vector to be shuffled randomly. */ template void do_shuffle(std::vector& elements) { @@ -101,27 +108,33 @@ class RNG { return; } m_shuffling_data.resize(elements.size()); - for (std::size_t i = 0; i < m_shuffling_data.size(); ++i) { + for (size_t i = 0; i < m_shuffling_data.size(); ++i) { m_shuffling_data[i].first = m_engine(); + // Tricky subtle point: without this extra entry to break ties, + // std::sort could give DIFFERENT results across platforms and compilers, + // if the object T allows unequal elements comparing equal. m_shuffling_data[i].second = i; } std::sort( m_shuffling_data.begin(), m_shuffling_data.end(), - [](const std::pair& lhs, - const std::pair& rhs) { + [](const std::pair& lhs, + const std::pair& rhs) { return lhs.first < rhs.first || (lhs.first == rhs.first && lhs.second < rhs.second); }); // Don't need to make a copy of "elements"! Just do repeated swaps... - for (std::size_t i = 0; i < m_shuffling_data.size(); ++i) { - const std::size_t& j = m_shuffling_data[i].second; + for (size_t i = 0; i < m_shuffling_data.size(); ++i) { + const size_t& j = m_shuffling_data[i].second; if (i != j) { std::swap(elements[i], elements[j]); } } } - /** Return a random element from the vector. */ + /** Return a random element from the vector. + * @param elements The vector to be sampled from. + * @return A reference to a random element, approximately uniform. + */ template const T& get_element(const std::vector& elements) { if (elements.empty()) { @@ -134,6 +147,10 @@ class RNG { * Pick out a random element from the vector, copy and return it, * but also remove that element from the vector (swapping with * the back for efficiency, i.e. the ordering changes). + * @param elements The vector to be sampled from. + * Decreases size by one each time. + * Time O(1) because the ordering is allowed to change. + * @return A copy of the removed element. */ template T get_and_remove_element(std::vector& elements) { @@ -141,23 +158,25 @@ class RNG { throw std::runtime_error( "RNG: get_and_remove_element called on empty vector"); } - std::size_t index = get_size_t(elements.size() - 1); + size_t index = get_size_t(elements.size() - 1); const T copy = elements[index]; elements[index] = elements.back(); elements.pop_back(); return copy; } - /** Returns the numbers {0,1,2,...,N-1} in some random order. */ - std::vector get_permutation(std::size_t size); + /** Returns the numbers {0,1,2,...,N-1} in some random order. + * @param size The size of the returned vector. + * @return An interval of nonnegative numbers, starting at zero, + * but rearranged randomly. + */ + std::vector get_permutation(size_t size); private: std::mt19937_64 m_engine; // Avoids repeated memory reallocation. - std::vector> m_shuffling_data; + std::vector> m_shuffling_data; }; -} // namespace tests -} // namespace graphs } // namespace tket diff --git a/tket/src/Utils/include/Utils/SequencedContainers.hpp b/tket/src/Utils/include/Utils/SequencedContainers.hpp index 843fb7453f..3a33fc174c 100644 --- a/tket/src/Utils/include/Utils/SequencedContainers.hpp +++ b/tket/src/Utils/include/Utils/SequencedContainers.hpp @@ -23,7 +23,23 @@ namespace tket { struct TagKey {}; +struct TagValue {}; struct TagSeq {}; + +template +using sequenced_bimap_t = boost::multi_index::multi_index_container< + std::pair, + boost::multi_index::indexed_by< + boost::multi_index::ordered_unique< + boost::multi_index::tag, + boost::multi_index::member< + std::pair, A, &std::pair::first>>, + boost::multi_index::ordered_unique< + boost::multi_index::tag, + boost::multi_index::member< + std::pair, B, &std::pair::second>>, + boost::multi_index::sequenced>>>; + template using sequenced_map_t = boost::multi_index::multi_index_container< std::pair, diff --git a/tket/src/Utils/include/Utils/UnitID.hpp b/tket/src/Utils/include/Utils/UnitID.hpp index dd4cc1f267..fd60c67452 100644 --- a/tket/src/Utils/include/Utils/UnitID.hpp +++ b/tket/src/Utils/include/Utils/UnitID.hpp @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -89,6 +90,7 @@ class UnitID { if (n < 0) return true; return data_->index_ < other.data_->index_; } + bool operator>(const UnitID &other) const { return other < *this; } bool operator==(const UnitID &other) const { return (this->data_->name_ == other.data_->name_) && (this->data_->index_ == other.data_->index_); @@ -248,9 +250,15 @@ class Node : public Qubit { JSON_DECL(Node) -/** A correspondence between two sets of node IDs */ +/** A correspondence between two sets of unit IDs */ typedef boost::bimap unit_bimap_t; +/** A pair of ("initial" and "final") correspondences between unit IDs */ +typedef struct { + unit_bimap_t initial; + unit_bimap_t final; +} unit_bimaps_t; + typedef std::vector unit_vector_t; typedef std::map unit_map_t; typedef std::set unit_set_t; @@ -268,4 +276,56 @@ typedef std::vector node_vector_t; /** A register of locations sharing the same name */ typedef std::map register_t; +template +static bool update_map(unit_bimap_t &m, const std::map &um) { + unit_map_t new_m; + bool changed = false; + for (const std::pair &pair : um) { + const auto &it = m.right.find(pair.first); + if (it == m.right.end()) { + continue; + } + new_m.insert({it->second, pair.second}); + changed |= (m.right.erase(pair.first) > 0); + } + for (const std::pair &pair : new_m) { + changed |= m.left.insert(pair).second; + } + return changed; +} + +/** + * Update a pair of "initial" and "final" correspondences. + * + * If \p maps is null then the function does nothing and returns false. + * + * @param[in,out] maps maps to be updated + * @param[in] um_initial new correspondences added to initial map + * @param[in] um_final new correspondences added to final map + * + * @tparam UnitA first unit type + * @tparam UnitB second unit type + * + * @return whether any changes were made to the maps + */ +template +bool update_maps( + std::shared_ptr maps, + const std::map &um_initial, + const std::map &um_final) { + if (!maps) return false; + // Can only work for Unit classes + static_assert(std::is_base_of::value); + static_assert(std::is_base_of::value); + // Unit types must be related, so cannot rename e.g. Bits to Qubits + static_assert( + std::is_base_of::value || + std::is_base_of::value); + + bool changed = false; + changed |= update_map(maps->initial, um_initial); + changed |= update_map(maps->final, um_final); + return changed; +} + } // namespace tket diff --git a/tket/src/ZX/CMakeLists.txt b/tket/src/ZX/CMakeLists.txt index 8021d15192..98d166404a 100644 --- a/tket/src/ZX/CMakeLists.txt +++ b/tket/src/ZX/CMakeLists.txt @@ -21,6 +21,7 @@ endif() add_library(tket-${COMP} ZXDConstructors.cpp ZXDExpansions.cpp + ZXDFormats.cpp ZXDGettersSetters.cpp ZXDManipulation.cpp ZXGenerator.cpp @@ -28,7 +29,8 @@ add_library(tket-${COMP} ZXRWAxioms.cpp ZXRWDecompositions.cpp ZXRWGraphLikeForm.cpp - ZXRWGraphLikeSimplification.cpp) + ZXRWGraphLikeSimplification.cpp + Flow.cpp) list(APPEND DEPS_${COMP} Utils) diff --git a/tket/src/ZX/Flow.cpp b/tket/src/ZX/Flow.cpp new file mode 100644 index 0000000000..e5561df993 --- /dev/null +++ b/tket/src/ZX/Flow.cpp @@ -0,0 +1,611 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ZX/Flow.hpp" + +#include "Utils/GraphHeaders.hpp" +#include "Utils/MatrixAnalysis.hpp" + +namespace tket { + +namespace zx { + +Flow::Flow( + const std::map& c, + const std::map& d) + : c_(c), d_(d) {} + +ZXVertSeqSet Flow::c(const ZXVert& v) const { return c_.at(v); } + +ZXVertSeqSet Flow::odd(const ZXVert& v, const ZXDiagram& diag) const { + sequenced_map_t parities; + ZXVertSeqSet cv = c(v); + for (const ZXVert& u : cv.get()) { + for (const ZXVert& n : diag.neighbours(u)) { + sequenced_map_t::iterator found = + parities.get().find(n); + if (found == parities.get().end()) { + parities.insert({n, 1}); + } else { + parities.replace(found, {n, found->second + 1}); + } + } + } + ZXVertSeqSet odds; + for (const std::pair& p : parities.get()) { + if (p.second % 2 == 1) { + odds.insert(p.first); + } + } + return odds; +} + +unsigned Flow::d(const ZXVert& v) const { return d_.at(v); } + +void Flow::verify(const ZXDiagram& diag) const { + if (!diag.is_MBQC()) + throw ZXError("Verifying a flow for a diagram that is not in MBQC form"); + BGL_FORALL_VERTICES(u, *diag.graph, ZXGraph) { + ZXType type = diag.get_zxtype(u); + if (is_boundary_type(type)) continue; + ZXVertSeqSet uc = c(u); + ZXVertSeqSet uodd = odd(u, diag); + for (const ZXVert& v : uc.get()) { + ZXType vt = diag.get_zxtype(v); + if (u != v && vt != ZXType::PX && vt != ZXType::PY && d(u) <= d(v)) + throw ZXError("A qubit has an X correction in its past"); + if (u != v && vt == ZXType::PY && d(u) <= d(v) && + uodd.find(v) == uodd.end()) + throw ZXError("A past Y vertex receives an X correction"); + } + for (const ZXVert& v : uodd.get()) { + ZXType vt = diag.get_zxtype(v); + if (u != v && vt != ZXType::PY && vt != ZXType::PZ && d(u) <= d(v)) + throw ZXError("A qubit has a Z correction in its past"); + if (u != v && vt == ZXType::PY && d(u) <= d(v) && uc.find(v) == uc.end()) + throw ZXError("A past Y vertex receives a Z correction"); + } + bool self_x = (uc.find(u) != uc.end()); + bool self_z = (uodd.find(u) != uodd.end()); + switch (type) { + case ZXType::XY: { + if (self_x || !self_z) + throw ZXError("XY vertex must be corrected with a Z"); + break; + } + case ZXType::XZ: { + if (!self_x || !self_z) + throw ZXError("XZ vertex must be corrected with a Y"); + break; + } + case ZXType::YZ: { + if (!self_x || self_z) + throw ZXError("YZ vertex must be corrected with an X"); + break; + } + case ZXType::PX: { + if (!self_z) throw ZXError("PX vertex must be corrected with a Y or Z"); + break; + } + case ZXType::PY: { + if (self_x == self_z) + throw ZXError("PY vertex must be corrected with an X or Z"); + break; + } + case ZXType::PZ: { + if (!self_x) + throw ZXError("PZ vertex must be corrected with an X or Y"); + break; + } + default: + throw ZXError("Invalid ZXType for MBQC diagram"); + } + } +} + +void Flow::focus(const ZXDiagram& diag) { + std::map order; + for (const std::pair& p : d_) { + auto found = order.find(p.second); + if (found == order.end()) + order.insert({p.second, {p.first}}); + else + found->second.push_back(p.first); + } + + for (const std::pair& p : order) { + for (const ZXVert& u : p.second) { + if (diag.get_zxtype(u) == ZXType::Output) continue; + ZXVertSeqSet uc = c(u); + ZXVertSeqSet uodd = odd(u, diag); + sequenced_map_t parities; + for (const ZXVert& v : uc.get()) parities.insert({v, 1}); + for (const ZXVert& v : uc.get()) { + if (v == u) continue; + ZXType vtype = diag.get_zxtype(v); + if ((vtype != ZXType::Output && vtype != ZXType::XY && + vtype != ZXType::PX && vtype != ZXType::PY) || + (vtype == ZXType::PY && uodd.find(v) == uodd.end())) { + ZXVertSeqSet cv = c(v); + for (const ZXVert& w : cv.get()) { + auto found = parities.get().find(w); + if (found == parities.get().end()) + parities.insert({w, 1}); + else + parities.replace(found, {w, found->second + 1}); + } + } + } + for (const ZXVert& v : uodd.get()) { + if (v == u) continue; + ZXType vtype = diag.get_zxtype(v); + if ((vtype != ZXType::Output && vtype != ZXType::XZ && + vtype != ZXType::YZ && vtype != ZXType::PY && + vtype != ZXType::PZ) || + (vtype == ZXType::PY && uc.find(v) == uc.end())) { + ZXVertSeqSet cv = c(v); + for (const ZXVert& w : cv.get()) { + auto found = parities.get().find(w); + if (found == parities.get().end()) + parities.insert({w, 1}); + else + parities.replace(found, {w, found->second + 1}); + } + } + } + ZXVertSeqSet new_c; + for (const std::pair p : parities.get()) { + if (p.second % 2 == 1) new_c.insert(p.first); + } + c_.at(u) = new_c; + } + } +} + +Flow Flow::identify_causal_flow(const ZXDiagram& diag) { + // Check diagram has the expected form for causal flow + if (!diag.is_MBQC()) + throw ZXError("ZXDiagram must be in MBQC form to identify causal flow"); + BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { + ZXType vtype = diag.get_zxtype(v); + if (!is_boundary_type(vtype) && vtype != ZXType::XY) + throw ZXError("Causal flow is only defined when all vertices are XY"); + } + + // solved contains all vertices for which we have found corrections + ZXVertSeqSet solved; + // correctors are those vertices that have been solved but are not yet + // fl.c(u) for some u + ZXVertSeqSet correctors; + // past[v] is undefined if v is not yet solved + // past[v] is the number of neighbours of v that are still unsolved + // When past[v] drops to 1, we can correct the unsolved vertex using an X on + // v and Z on all of its other neighbours + std::map past; + Flow fl{{}, {}}; + + // Outputs are trivially solved + for (const ZXVert& o : diag.get_boundary(ZXType::Output)) { + // MBQC form of ZX Diagrams requires each output to have a unique Hadamard + // edge to another vertex + past[o] = 1; + solved.insert(o); + fl.c_.insert({o, {}}); + fl.d_.insert({o, 0}); + // All outputs have been extended so are either non-inputs or disconnected + // from any other vertices, so safe to add to correctors + correctors.insert(o); + } + + unsigned depth = 1; + + do { + ZXVertSeqSet new_correctors; + for (const ZXVert& v : correctors.get()) { + // Determine whether |N(v) cap unsolved| == 1 to find u + ZXVert u; + unsigned n_found = 0; + for (const ZXVert& vn : diag.neighbours(v)) { + if (solved.find(vn) == solved.end()) { + u = vn; + ++n_found; + } + } + if (n_found != 1) continue; + + // Can correct u by firing stabilizer of v + fl.c_.insert({u, {v}}); + fl.d_.insert({u, depth}); + solved.insert(u); + + // Determine any new correctors + n_found = 0; + bool in = false; + for (const ZXVert& un : diag.neighbours(u)) { + if (diag.get_zxtype(un) == ZXType::Input) { + in = true; + solved.insert(un); + continue; + } + if (solved.find(un) == solved.end()) { + ++n_found; + } + // Another neighbour of un has been solved, so check if it can now + // correct something + auto it = past.find(un); + if (it != past.end() && it->second > 0) { + --it->second; + if (it->second == 1) new_correctors.insert(un); + } + } + // u is a new corrector if u notin I and |N(u) cap unsolved| == 1 + if (!in) { + past.insert({u, n_found}); + if (n_found == 1) new_correctors.insert(u); + } + } + correctors = new_correctors; + ++depth; + } while (!correctors.empty()); + if (solved.size() != diag.n_vertices()) + throw ZXError("ZXDiagram does not have causal flow"); + return fl; +} + +std::map Flow::gauss_solve_correctors( + const ZXDiagram& diag, const boost::bimap& correctors, + const boost::bimap& preserve, const ZXVertVec& to_solve, + const boost::bimap& ys) { + unsigned n_correctors = correctors.size(); + unsigned n_preserve = preserve.size(); + unsigned n_to_solve = to_solve.size(); + unsigned n_ys = ys.size(); + MatrixXb mat = MatrixXb::Zero(n_preserve + n_ys, n_correctors + n_to_solve); + // Build adjacency matrix + for (boost::bimap::const_iterator it = correctors.begin(), + end = correctors.end(); + it != end; ++it) { + for (const ZXVert& n : diag.neighbours(it->left)) { + auto in_past = preserve.left.find(n); + if (in_past != preserve.left.end()) { + mat(in_past->second, it->right) = true; + } else { + auto in_ys = ys.left.find(n); + if (in_ys != ys.left.end()) { + mat(n_preserve + in_ys->second, it->right) = true; + } + } + } + } + for (boost::bimap::const_iterator it = ys.begin(), + end = ys.end(); + it != end; ++it) { + auto found = correctors.left.find(it->left); + if (found != correctors.left.end()) + mat(n_preserve + it->right, found->second) = true; + } + // Add rhs + for (unsigned i = 0; i < n_to_solve; ++i) { + ZXVert v = to_solve.at(i); + switch (diag.get_zxtype(v)) { + case ZXType::XY: + case ZXType::PX: { + mat(preserve.left.at(v), n_correctors + i) = true; + break; + } + case ZXType::XZ: { + mat(preserve.left.at(v), n_correctors + i) = true; + } + // fall through + case ZXType::YZ: + case ZXType::PZ: { + for (const ZXVert& n : diag.neighbours(v)) { + auto found = preserve.left.find(n); + if (found != preserve.left.end()) + mat(found->second, n_correctors + i) = true; + else { + found = ys.left.find(n); + if (found != ys.left.end()) + mat(n_preserve + found->second, n_correctors + i) = true; + } + } + break; + } + case ZXType::PY: { + mat(n_preserve + ys.left.at(v), n_correctors + i) = true; + break; + } + default: { + throw ZXError( + "Internal error in flow identification: non-MBQC vertex found"); + } + } + } + + // Gaussian elimination + std::vector> row_ops = + gaussian_elimination_row_ops( + mat.block(0, 0, n_preserve + n_ys, n_correctors)); + for (const std::pair& op : row_ops) { + for (unsigned j = 0; j < n_correctors + n_to_solve; ++j) { + mat(op.second, j) ^= mat(op.first, j); + } + } + + // Back substitution + // For each row i, pick a corrector j for which mat(i,j) == true, else + // determine that row i has zero lhs + std::map row_corrector; + for (unsigned i = 0; i < n_preserve + n_ys; ++i) { + for (unsigned j = 0; j < n_correctors; ++j) { + if (mat(i, j)) { + row_corrector.insert({i, correctors.right.at(j)}); + break; + } + } + } + // For each past i, scan down column of rhs and for each mat(j,CI+i) == true, + // add corrector from row j or try next i if row j has zero lhs + std::map solved_flow; + for (unsigned i = 0; i < n_to_solve; ++i) { + bool fail = false; + ZXVertSeqSet c_i; + for (unsigned j = 0; j < n_preserve + n_ys; ++j) { + if (mat(j, n_correctors + i)) { + auto found = row_corrector.find(j); + if (found == row_corrector.end()) { + fail = true; + break; + } else { + c_i.insert(found->second); + } + } + } + if (!fail) { + ZXVert v = to_solve.at(i); + ZXType vt = diag.get_zxtype(v); + if (vt == ZXType::XZ || vt == ZXType::YZ || vt == ZXType::PZ) + c_i.insert(v); + solved_flow.insert({v, c_i}); + } + } + return solved_flow; +} + +Flow Flow::identify_pauli_flow(const ZXDiagram& diag) { + // Check diagram has the expected form for pauli flow + if (!diag.is_MBQC()) + throw ZXError("ZXDiagram must be in MBQC form to identify Pauli flow"); + + ZXVertSeqSet solved; + std::set inputs; + Flow fl{{}, {}}; + + // Tag input measurements + for (const ZXVert& i : diag.get_boundary(ZXType::Input)) { + ZXVert ni = diag.neighbours(i).at(0); + inputs.insert(ni); + ZXType nt = diag.get_zxtype(ni); + if (nt == ZXType::XZ || nt == ZXType::YZ || nt == ZXType::PY) + throw ZXError( + "Inputs measured in XZ, YZ, or Y cannot be corrected with Pauli " + "flow"); + } + + // Indexing of correctors in binary matrix can be preserved between rounds as + // we will only ever add new correctors + boost::bimap correctors; + unsigned corrector_i = 0; + + BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { + switch (diag.get_zxtype(v)) { + case ZXType::Output: { + // Outputs are trivially solved + solved.insert(v); + fl.c_.insert({v, {}}); + fl.d_.insert({v, 0}); + // Cannot use inputs to correct + if (inputs.find(v) == inputs.end()) { + correctors.insert({v, corrector_i}); + ++corrector_i; + } + break; + } + case ZXType::PX: + case ZXType::PY: { + // Can use non-input Xs and Ys to correct + if (inputs.find(v) == inputs.end()) { + correctors.insert({v, corrector_i}); + ++corrector_i; + } + break; + } + default: + break; + } + } + + unsigned depth = 1; + + unsigned n_solved = 0; + do { + // Construct Gaussian elimination problem + boost::bimap preserve; + boost::bimap unsolved_ys; + ZXVertVec to_solve; + BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { + ZXType type = diag.get_zxtype(v); + if (solved.get().find(v) == solved.get().end() && + type != ZXType::Input) { + to_solve.push_back(v); + if (type == ZXType::PY) + unsolved_ys.insert({v, (unsigned)unsolved_ys.size()}); + else if (type != ZXType::PZ) + preserve.insert({v, (unsigned)preserve.size()}); + } + } + + std::map new_corrections = gauss_solve_correctors( + diag, correctors, preserve, to_solve, unsolved_ys); + + n_solved = new_corrections.size(); + + for (const std::pair& nc : new_corrections) { + fl.c_.insert(nc); + fl.d_.insert({nc.first, depth}); + solved.insert(nc.first); + if (inputs.find(nc.first) == inputs.end()) + correctors.insert({nc.first, (unsigned)correctors.size()}); + } + + ++depth; + } while (n_solved != 0); + + if (solved.size() + inputs.size() != diag.n_vertices()) + throw ZXError("ZXDiagram does not have pauli flow"); + + return fl; +} + +std::set Flow::identify_focussed_sets(const ZXDiagram& diag) { + // Check diagram has the expected form for pauli flow + if (!diag.is_MBQC()) + throw ZXError("ZXDiagram must be in MBQC form to identify gflow"); + + std::set inputs; + + // Tag input measurements + for (const ZXVert& i : diag.get_boundary(ZXType::Input)) { + ZXVert ni = diag.neighbours(i).at(0); + inputs.insert(ni); + ZXType nt = diag.get_zxtype(ni); + if (nt == ZXType::XZ || nt == ZXType::YZ || nt == ZXType::PY) + throw ZXError( + "Inputs measured in XZ, YZ, or Y cannot be corrected with Pauli " + "flow"); + } + + // Build Gaussian elimination problem + boost::bimap correctors; + boost::bimap preserve; + boost::bimap ys; + unsigned n_correctors = 0; + unsigned n_preserve = 0; + unsigned n_ys = 0; + + BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { + switch (diag.get_zxtype(v)) { + case ZXType::Output: { + // Cannot use inputs to correct + if (inputs.find(v) == inputs.end()) { + correctors.insert({v, n_correctors}); + ++n_correctors; + } + break; + } + case ZXType::XY: + case ZXType::PX: { + preserve.insert({v, n_preserve}); + ++n_preserve; + // Can use non-input Xs and Ys to correct + if (inputs.find(v) == inputs.end()) { + correctors.insert({v, n_correctors}); + ++n_correctors; + } + break; + } + case ZXType::PY: { + ys.insert({v, n_ys}); + ++n_ys; + // Can use non-input Xs and Ys to correct + if (inputs.find(v) == inputs.end()) { + correctors.insert({v, n_correctors}); + ++n_correctors; + } + break; + } + default: + break; + } + } + + MatrixXb mat = MatrixXb::Zero(n_preserve + n_ys, n_correctors); + + // Build adjacency matrix + for (boost::bimap::const_iterator it = correctors.begin(), + end = correctors.end(); + it != end; ++it) { + for (const ZXVert& n : diag.neighbours(it->left)) { + auto in_preserve = preserve.left.find(n); + if (in_preserve != preserve.left.end()) { + mat(in_preserve->second, it->right) = true; + } else { + auto in_ys = ys.left.find(n); + if (in_ys != ys.left.end()) { + mat(n_preserve + in_ys->second, it->right) = true; + } + } + } + } + for (boost::bimap::const_iterator it = ys.begin(), + end = ys.end(); + it != end; ++it) { + auto found = correctors.left.find(it->left); + if (found != correctors.left.end()) + mat(n_preserve + it->right, found->second) = true; + } + + // Gaussian elimination + std::vector> row_ops = + gaussian_elimination_row_ops(mat); + for (const std::pair& op : row_ops) { + for (unsigned j = 0; j < n_correctors; ++j) { + mat(op.second, j) ^= mat(op.first, j); + } + } + + // Back substitution + // For each column j, it either a leading column (the first column for which + // mat(i,j) == true for a given i, so set row_corrector[i] = j; by Gaussian + // Elimination this is the only entry in the column) or it describes the + // focussed set generator {j} + {row_corrector[i] | mat(i,j) == true} + std::set focussed; + std::map row_corrector; + for (boost::bimap::const_iterator it = correctors.begin(), + end = correctors.end(); + it != end; ++it) { + ZXVertSeqSet fset{it->left}; + bool new_row_corrector = false; + for (unsigned i = 0; i < n_preserve + n_ys; ++i) { + if (mat(i, it->right)) { + auto inserted = row_corrector.insert({i, it->left}); + if (inserted.second) { + // New row_corrector, so move to next column + new_row_corrector = true; + break; + } else { + // Non-correcting column + fset.insert(inserted.first->second); + } + } + } + if (!new_row_corrector) focussed.insert({fset}); + } + + return focussed; +} + +} // namespace zx + +} // namespace tket diff --git a/tket/src/ZX/ZXDExpansions.cpp b/tket/src/ZX/ZXDExpansions.cpp index c9f8fa164f..20578fa8ba 100644 --- a/tket/src/ZX/ZXDExpansions.cpp +++ b/tket/src/ZX/ZXDExpansions.cpp @@ -62,19 +62,44 @@ ZXDiagram ZXDiagram::to_doubled_diagram() const { break; } case ZXType::ZSpider: - case ZXType::XSpider: { - const BasicGen& bg = static_cast(*op); - orig_op = std::make_shared( + case ZXType::XSpider: + case ZXType::XY: + case ZXType::YZ: { + const PhasedGen& bg = static_cast(*op); + orig_op = std::make_shared( op->get_type(), bg.get_param(), QuantumType::Classical); - conj_op = std::make_shared( + conj_op = std::make_shared( op->get_type(), -bg.get_param(), QuantumType::Classical); break; } + case ZXType::XZ: { + const PhasedGen& bg = static_cast(*op); + orig_op = std::make_shared( + op->get_type(), bg.get_param(), QuantumType::Classical); + conj_op = orig_op; + break; + } + case ZXType::PX: + case ZXType::PZ: { + const CliffordGen& bg = static_cast(*op); + orig_op = std::make_shared( + op->get_type(), bg.get_param(), QuantumType::Classical); + conj_op = orig_op; + break; + } + case ZXType::PY: { + const CliffordGen& bg = static_cast(*op); + orig_op = std::make_shared( + op->get_type(), bg.get_param(), QuantumType::Classical); + conj_op = std::make_shared( + op->get_type(), !bg.get_param(), QuantumType::Classical); + break; + } case ZXType::Hbox: { - const BasicGen& bg = static_cast(*op); - orig_op = std::make_shared( + const PhasedGen& bg = static_cast(*op); + orig_op = std::make_shared( op->get_type(), bg.get_param(), QuantumType::Classical); - conj_op = std::make_shared( + conj_op = std::make_shared( op->get_type(), SymEngine::conjugate(bg.get_param()), QuantumType::Classical); break; @@ -174,7 +199,7 @@ ZXDiagram ZXDiagram::to_quantum_embedding() const { if (embedding.get_qtype(b) == QuantumType::Classical) { ZXVert new_b = embedding.add_vertex(embedding.get_zxtype(b), QuantumType::Quantum); - ZXGen_ptr id = std::make_shared( + ZXGen_ptr id = std::make_shared( ZXType::ZSpider, 0., QuantumType::Classical); embedding.set_vertex_ZXGen_ptr(b, id); embedding.add_wire(new_b, b, ZXWireType::Basic, QuantumType::Quantum); diff --git a/tket/src/ZX/ZXDFormats.cpp b/tket/src/ZX/ZXDFormats.cpp new file mode 100644 index 0000000000..5ce8455cb4 --- /dev/null +++ b/tket/src/ZX/ZXDFormats.cpp @@ -0,0 +1,59 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Utils/GraphHeaders.hpp" +#include "ZX/ZXDiagram.hpp" + +namespace tket { + +namespace zx { + +bool ZXDiagram::is_graphlike() const { + BGL_FORALL_EDGES(w, *graph, ZXGraph) { + if (is_boundary_type(get_zxtype(source(w))) || + is_boundary_type(get_zxtype(target(w)))) { + if (get_wire_type(w) != ZXWireType::Basic) return false; + } else { + if (get_wire_type(w) != ZXWireType::H) return false; + } + } + BGL_FORALL_VERTICES(v, *graph, ZXGraph) { + ZXType type = get_zxtype(v); + if (type != ZXType::ZSpider && !is_boundary_type(type)) return false; + } + return true; +} + +bool ZXDiagram::is_MBQC() const { + BGL_FORALL_EDGES(w, *graph, ZXGraph) { + if (get_qtype(w) != QuantumType::Quantum) return false; + if (get_zxtype(source(w)) == ZXType::Input || + get_zxtype(target(w)) == ZXType::Input) { + if (get_wire_type(w) != ZXWireType::Basic) return false; + } else { + if (get_wire_type(w) != ZXWireType::H) return false; + } + } + BGL_FORALL_VERTICES(v, *graph, ZXGraph) { + ZXType type = get_zxtype(v); + if (!is_MBQC_type(type) && type != ZXType::Input && type != ZXType::Output) + return false; + if (get_qtype(v) != QuantumType::Quantum) return false; + } + return true; +} + +} // namespace zx + +} // namespace tket diff --git a/tket/src/ZX/ZXDGettersSetters.cpp b/tket/src/ZX/ZXDGettersSetters.cpp index baa089fd2a..1356fa6c06 100644 --- a/tket/src/ZX/ZXDGettersSetters.cpp +++ b/tket/src/ZX/ZXDGettersSetters.cpp @@ -213,7 +213,7 @@ void ZXDiagram::set_wire_type(const Wire& w, ZXWireType type) { bool ZXDiagram::is_pauli_spider(const ZXVert& v) const { ZXGen_ptr op = get_vertex_ZXGen_ptr(v); if (!is_spider_type(op->get_type())) return false; - const BasicGen& bg = static_cast(*op); + const PhasedGen& bg = static_cast(*op); std::optional pi2_mult = equiv_Clifford(bg.get_param()); return (pi2_mult && ((*pi2_mult % 2) == 0)); } @@ -221,7 +221,7 @@ bool ZXDiagram::is_pauli_spider(const ZXVert& v) const { bool ZXDiagram::is_proper_clifford_spider(const ZXVert& v) const { ZXGen_ptr op = get_vertex_ZXGen_ptr(v); if (!is_spider_type(op->get_type())) return false; - const BasicGen& bg = static_cast(*op); + const PhasedGen& bg = static_cast(*op); std::optional pi2_mult = equiv_Clifford(bg.get_param()); return (pi2_mult && ((*pi2_mult % 2) == 1)); } @@ -249,7 +249,7 @@ static std::string graphviz_vertex_props(ZXGen_ptr op) { } case ZXType::ZSpider: case ZXType::XSpider: { - const BasicGen& bg = static_cast(*op); + const PhasedGen& bg = static_cast(*op); Expr p = bg.get_param(); std::string colour = (type == ZXType::ZSpider) ? "green" : "red"; ss << "fillcolor=\"" << colour << "\" shape=circle label=\""; @@ -258,7 +258,7 @@ static std::string graphviz_vertex_props(ZXGen_ptr op) { break; } case ZXType::Hbox: { - const BasicGen& bg = static_cast(*op); + const PhasedGen& bg = static_cast(*op); Expr p = bg.get_param(); std::optional ev = eval_expr_c(p); ss << "fillcolor=\"gold\" shape=square label=\""; @@ -266,6 +266,15 @@ static std::string graphviz_vertex_props(ZXGen_ptr op) { ss << "\""; break; } + case ZXType::XY: + case ZXType::XZ: + case ZXType::YZ: + case ZXType::PX: + case ZXType::PY: + case ZXType::PZ: { + ss << "shape=point label=\"" << op->get_name() << "\""; + break; + } case ZXType::Triangle: { ss << "fillcolor=\"gold\" shape=triangle"; break; diff --git a/tket/src/ZX/ZXGenerator.cpp b/tket/src/ZX/ZXGenerator.cpp index 9777f1df99..284bf52dd1 100644 --- a/tket/src/ZX/ZXGenerator.cpp +++ b/tket/src/ZX/ZXGenerator.cpp @@ -39,7 +39,8 @@ bool is_boundary_type(ZXType type) { bool is_basic_gen_type(ZXType type) { static const ZXTypeSet basics = { - ZXType::ZSpider, ZXType::XSpider, ZXType::Hbox}; + ZXType::ZSpider, ZXType::XSpider, ZXType::Hbox, ZXType::XY, ZXType::XZ, + ZXType::YZ, ZXType::PX, ZXType::PY, ZXType::PZ}; return find_in_set(type, basics); } @@ -53,6 +54,24 @@ bool is_directed_type(ZXType type) { return find_in_set(type, directed); } +bool is_MBQC_type(ZXType type) { + static const ZXTypeSet MBQC = {ZXType::XY, ZXType::XZ, ZXType::YZ, + ZXType::PX, ZXType::PY, ZXType::PZ}; + return find_in_set(type, MBQC); +} + +bool is_phase_type(ZXType type) { + static const ZXTypeSet phases = {ZXType::ZSpider, ZXType::XSpider, + ZXType::Hbox, ZXType::XY, + ZXType::XZ, ZXType::YZ}; + return find_in_set(type, phases); +} + +bool is_Clifford_gen_type(ZXType type) { + static const ZXTypeSet cliffords = {ZXType::PX, ZXType::PY, ZXType::PZ}; + return find_in_set(type, cliffords); +} + /** * ZXGen (Base class) implementation */ @@ -77,12 +96,21 @@ ZXGen_ptr ZXGen::create_gen(ZXType type, QuantumType qtype) { break; } case ZXType::ZSpider: - case ZXType::XSpider: { - op = std::make_shared(type, 0., qtype); + case ZXType::XSpider: + case ZXType::XY: + case ZXType::XZ: + case ZXType::YZ: { + op = std::make_shared(type, 0., qtype); break; } case ZXType::Hbox: { - op = std::make_shared(type, -1., qtype); + op = std::make_shared(type, -1., qtype); + break; + } + case ZXType::PX: + case ZXType::PY: + case ZXType::PZ: { + op = std::make_shared(type, false, qtype); break; } case ZXType::Triangle: { @@ -99,12 +127,29 @@ ZXGen_ptr ZXGen::create_gen(ZXType type, const Expr& param, QuantumType qtype) { ZXGen_ptr op; switch (type) { case ZXType::ZSpider: - case ZXType::XSpider: { - op = std::make_shared(type, param, qtype); + case ZXType::XSpider: + case ZXType::XY: + case ZXType::XZ: + case ZXType::YZ: + case ZXType::Hbox: { + op = std::make_shared(type, param, qtype); break; } - case ZXType::Hbox: { - op = std::make_shared(type, param, qtype); + default: + throw ZXError( + "Cannot instantiate a parameterised ZXGen of the required " + "type"); + } + return op; +} + +ZXGen_ptr ZXGen::create_gen(ZXType type, bool param, QuantumType qtype) { + ZXGen_ptr op; + switch (type) { + case ZXType::PX: + case ZXType::PY: + case ZXType::PZ: { + op = std::make_shared(type, param, qtype); break; } default: @@ -173,8 +218,8 @@ bool BoundaryGen::operator==(const ZXGen& other) const { * BasicGen implementation */ -BasicGen::BasicGen(ZXType type, const Expr& param, QuantumType qtype) - : ZXGen(type), qtype_(qtype), param_(param) { +BasicGen::BasicGen(ZXType type, QuantumType qtype) + : ZXGen(type), qtype_(qtype) { if (!is_basic_gen_type(type)) { throw ZXError("Unsupported ZXType for BasicGen"); } @@ -188,16 +233,32 @@ bool BasicGen::valid_edge( this->qtype_ == QuantumType::Classical); } -Expr BasicGen::get_param() const { return param_; } +bool BasicGen::operator==(const ZXGen& other) const { + if (!ZXGen::operator==(other)) return false; + const BasicGen& other_basic = static_cast(other); + return this->qtype_ == other_basic.qtype_; +} -SymSet BasicGen::free_symbols() const { return expr_free_symbols(param_); } +/** + * PhasedGen implementation + */ +PhasedGen::PhasedGen(ZXType type, const Expr& param, QuantumType qtype) + : BasicGen(type, qtype), param_(param) { + if (!is_phase_type(type)) { + throw ZXError("Unsupported ZXType for PhasedGen"); + } +} -ZXGen_ptr BasicGen::symbol_substitution( +Expr PhasedGen::get_param() const { return param_; } + +SymSet PhasedGen::free_symbols() const { return expr_free_symbols(param_); } + +ZXGen_ptr PhasedGen::symbol_substitution( const SymEngine::map_basic_basic& sub_map) const { - return std::make_shared(type_, param_.subs(sub_map), qtype_); + return std::make_shared(type_, param_.subs(sub_map), qtype_); } -std::string BasicGen::get_name(bool) const { +std::string PhasedGen::get_name(bool) const { std::stringstream st; if (qtype_ == QuantumType::Quantum) { st << "Q-"; @@ -214,18 +275,75 @@ std::string BasicGen::get_name(bool) const { case ZXType::Hbox: st << "H"; break; + case ZXType::XY: + st << "XY"; + break; + case ZXType::XZ: + st << "XZ"; + break; + case ZXType::YZ: + st << "YZ"; + break; default: - throw ZXError("BasicGen with invalid ZXType"); + throw ZXError("PhasedGen with invalid ZXType"); } st << "(" << param_ << ")"; return st.str(); } -bool BasicGen::operator==(const ZXGen& other) const { - if (!ZXGen::operator==(other)) return false; - const BasicGen& other_basic = static_cast(other); - return ( - this->qtype_ == other_basic.qtype_ && this->param_ == other_basic.param_); +bool PhasedGen::operator==(const ZXGen& other) const { + if (!BasicGen::operator==(other)) return false; + const PhasedGen& other_basic = static_cast(other); + return this->param_ == other_basic.param_; +} + +/** + * CliffordGen implementation + */ +CliffordGen::CliffordGen(ZXType type, bool param, QuantumType qtype) + : BasicGen(type, qtype), param_(param) { + if (!is_Clifford_gen_type(type)) { + throw ZXError("Unsupported ZXType for CliffordGen"); + } +} + +bool CliffordGen::get_param() const { return param_; } + +SymSet CliffordGen::free_symbols() const { return {}; } + +ZXGen_ptr CliffordGen::symbol_substitution( + const SymEngine::map_basic_basic&) const { + return ZXGen_ptr(); +} + +std::string CliffordGen::get_name(bool) const { + std::stringstream st; + if (qtype_ == QuantumType::Quantum) { + st << "Q-"; + } else { + st << "C-"; + } + switch (type_) { + case ZXType::PX: + st << "X"; + break; + case ZXType::PY: + st << "Y"; + break; + case ZXType::PZ: + st << "Z"; + break; + default: + throw ZXError("CliffordGen with invalid ZXType"); + } + st << "(" << param_ << ")"; + return st.str(); +} + +bool CliffordGen::operator==(const ZXGen& other) const { + if (!BasicGen::operator==(other)) return false; + const CliffordGen& other_basic = static_cast(other); + return this->param_ == other_basic.param_; } /** diff --git a/tket/src/ZX/ZXRWAxioms.cpp b/tket/src/ZX/ZXRWAxioms.cpp index e285ea06fa..6f92633b5e 100644 --- a/tket/src/ZX/ZXRWAxioms.cpp +++ b/tket/src/ZX/ZXRWAxioms.cpp @@ -14,6 +14,7 @@ #include "Utils/GraphHeaders.hpp" #include "ZX/Rewrite.hpp" +#include "ZXDiagramImpl.hpp" namespace tket { @@ -32,8 +33,8 @@ bool Rewrite::red_to_green_fun(ZXDiagram& diag) { : ZXWireType::H; } // Replace X spider with Z spider - const BasicGen& x = diag.get_vertex_ZXGen(v); - ZXGen_ptr z = std::make_shared( + const PhasedGen& x = diag.get_vertex_ZXGen(v); + ZXGen_ptr z = std::make_shared( ZXType::ZSpider, x.get_param(), *x.get_qtype()); diag.set_vertex_ZXGen_ptr(v, z); } @@ -44,7 +45,9 @@ Rewrite Rewrite::red_to_green() { return Rewrite(red_to_green_fun); } bool Rewrite::spider_fusion_fun(ZXDiagram& diag) { bool success = false; + std::set bin; BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { + if (bin.contains(v)) continue; ZXType vtype = diag.get_zxtype(v); if (!is_spider_type(vtype)) continue; /** @@ -60,6 +63,7 @@ bool Rewrite::spider_fusion_fun(ZXDiagram& diag) { adj_list.pop_front(); ZXWireType wtype = diag.get_wire_type(w); ZXVert u = diag.other_end(w, v); + if (bin.contains(u)) continue; ZXType utype = diag.get_zxtype(u); bool same_colour = vtype == utype; if (!is_spider_type(utype) || u == v || @@ -68,9 +72,9 @@ bool Rewrite::spider_fusion_fun(ZXDiagram& diag) { // The spiders `u` and `v` can be fused together // We merge into `v` and remove `u` so that we can efficiently continue to // search the neighbours - const BasicGen& vspid = diag.get_vertex_ZXGen(v); - const BasicGen& uspid = diag.get_vertex_ZXGen(u); - ZXGen_ptr new_spid = std::make_shared( + const PhasedGen& vspid = diag.get_vertex_ZXGen(v); + const PhasedGen& uspid = diag.get_vertex_ZXGen(u); + ZXGen_ptr new_spid = std::make_shared( vtype, vspid.get_param() + uspid.get_param(), (vspid.get_qtype() == QuantumType::Classical || uspid.get_qtype() == QuantumType::Classical) @@ -102,10 +106,13 @@ bool Rewrite::spider_fusion_fun(ZXDiagram& diag) { adj_list.push_back(new_w); } // Remove `u` - diag.remove_vertex(u); + bin.insert(u); success = true; } } + for (ZXVert u : bin) { + diag.remove_vertex(u); + } return success; } @@ -139,8 +146,8 @@ bool Rewrite::self_loop_removal_fun(ZXDiagram& diag) { success = true; } if ((n_pis % 2) == 1) { - const BasicGen& spid = diag.get_vertex_ZXGen(v); - ZXGen_ptr new_spid = std::make_shared( + const PhasedGen& spid = diag.get_vertex_ZXGen(v); + ZXGen_ptr new_spid = std::make_shared( vtype, spid.get_param() + 1., vqtype); diag.set_vertex_ZXGen_ptr(v, new_spid); } diff --git a/tket/src/ZX/ZXRWDecompositions.cpp b/tket/src/ZX/ZXRWDecompositions.cpp index 0c8eb209a5..75cece0eb0 100644 --- a/tket/src/ZX/ZXRWDecompositions.cpp +++ b/tket/src/ZX/ZXRWDecompositions.cpp @@ -79,8 +79,8 @@ Rewrite Rewrite::decompose_boxes() { return Rewrite(decompose_boxes_fun); } bool Rewrite::basic_wires_fun(ZXDiagram& diag) { ZXGen_ptr qhad = - std::make_shared(ZXType::Hbox, -1, QuantumType::Quantum); - ZXGen_ptr chad = std::make_shared( + std::make_shared(ZXType::Hbox, -1, QuantumType::Quantum); + ZXGen_ptr chad = std::make_shared( ZXType::Hbox, -1, QuantumType::Classical); WireVec targets; BGL_FORALL_EDGES(w, *diag.graph, ZXGraph) { diff --git a/tket/src/ZX/ZXRWGraphLikeForm.cpp b/tket/src/ZX/ZXRWGraphLikeForm.cpp index f45ad34758..c849d08ddc 100644 --- a/tket/src/ZX/ZXRWGraphLikeForm.cpp +++ b/tket/src/ZX/ZXRWGraphLikeForm.cpp @@ -39,7 +39,7 @@ bool Rewrite::separate_boundaries_fun(ZXDiagram& diag) { } // New wires will inherit `w`'s `qtype` QuantumType wq = diag.get_qtype(w); - ZXGen_ptr id = std::make_shared(ZXType::ZSpider, 0., wq); + ZXGen_ptr id = std::make_shared(ZXType::ZSpider, 0., wq); ZXVert z_at_b = diag.add_vertex(id); diag.add_wire(b, z_at_b, ZXWireType::Basic, wq); ZXVert z_at_o = diag.add_vertex(id); diff --git a/tket/src/ZX/ZXRWGraphLikeSimplification.cpp b/tket/src/ZX/ZXRWGraphLikeSimplification.cpp index cb93ca8370..d9f180c593 100644 --- a/tket/src/ZX/ZXRWGraphLikeSimplification.cpp +++ b/tket/src/ZX/ZXRWGraphLikeSimplification.cpp @@ -36,6 +36,7 @@ static bool can_complement_neighbourhood( } bool Rewrite::remove_interior_cliffords_fun(ZXDiagram& diag) { + if (!diag.is_graphlike()) return false; bool success = false; ZXVertSeqSet candidates; BGL_FORALL_VERTICES(v, *diag.graph, ZXGraph) { candidates.insert(v); } @@ -45,7 +46,7 @@ bool Rewrite::remove_interior_cliffords_fun(ZXDiagram& diag) { ZXVert v = *it; view.erase(it); if (!diag.is_proper_clifford_spider(v)) continue; - const BasicGen& spid = diag.get_vertex_ZXGen(v); + const PhasedGen& spid = diag.get_vertex_ZXGen(v); QuantumType vqtype = *spid.get_qtype(); ZXVertVec neighbours = diag.neighbours(v); if (!can_complement_neighbourhood(diag, vqtype, neighbours)) continue; @@ -69,14 +70,14 @@ bool Rewrite::remove_interior_cliffords_fun(ZXDiagram& diag) { diag.add_wire(*xi, *yi, ZXWireType::H, vqtype); } } - const BasicGen& xi_op = diag.get_vertex_ZXGen(*xi); + const PhasedGen& xi_op = diag.get_vertex_ZXGen(*xi); // If `v` is Quantum, Classical neighbours will pick up both the +theta // and -theta phases, cancelling out if (vqtype == QuantumType::Quantum && *xi_op.get_qtype() == QuantumType::Classical) continue; // Update phase information - ZXGen_ptr xi_new_op = std::make_shared( + ZXGen_ptr xi_new_op = std::make_shared( ZXType::ZSpider, xi_op.get_param() - spid.get_param(), *xi_op.get_qtype()); diag.set_vertex_ZXGen_ptr(*xi, xi_new_op); @@ -96,8 +97,8 @@ Rewrite Rewrite::remove_interior_cliffords() { static void add_phase_to_vertices( ZXDiagram& diag, const ZXVertSeqSet& verts, const Expr& phase) { for (const ZXVert& v : verts) { - const BasicGen& old_spid = diag.get_vertex_ZXGen(v); - ZXGen_ptr new_spid = std::make_shared( + const PhasedGen& old_spid = diag.get_vertex_ZXGen(v); + ZXGen_ptr new_spid = std::make_shared( ZXType::ZSpider, old_spid.get_param() + phase, *old_spid.get_qtype()); diag.set_vertex_ZXGen_ptr(v, new_spid); } @@ -123,6 +124,7 @@ static void bipartite_complementation( } bool Rewrite::remove_interior_paulis_fun(ZXDiagram& diag) { + if (!diag.is_graphlike()) return false; bool success = false; ZXVertSeqSet candidates; // Need an indirect iterator as BGL_FORALL_VERTICES // breaks when removing the current vertex @@ -167,8 +169,8 @@ bool Rewrite::remove_interior_paulis_fun(ZXDiagram& diag) { } excl_u.erase(v); excl_v.erase(joint.begin(), joint.end()); - const BasicGen& v_spid = diag.get_vertex_ZXGen(v); - const BasicGen& u_spid = diag.get_vertex_ZXGen(u); + const PhasedGen& v_spid = diag.get_vertex_ZXGen(v); + const PhasedGen& u_spid = diag.get_vertex_ZXGen(u); add_phase_to_vertices( diag, joint, v_spid.get_param() + u_spid.get_param() + 1.); @@ -194,6 +196,7 @@ Rewrite Rewrite::remove_interior_paulis() { } bool Rewrite::extend_at_boundary_paulis_fun(ZXDiagram& diag) { + if (!diag.is_graphlike()) return false; bool success = false; for (const ZXVert& b : diag.get_boundary()) { // Valid ZX graph requires boundaries to have a unique neighbour @@ -220,7 +223,8 @@ bool Rewrite::extend_at_boundary_paulis_fun(ZXDiagram& diag) { // extend it ZXGen_ptr u_op = diag.get_vertex_ZXGen_ptr(u); QuantumType qtype = *u_op->get_qtype(); - ZXGen_ptr id = std::make_shared(ZXType::ZSpider, 0., qtype); + ZXGen_ptr id = + std::make_shared(ZXType::ZSpider, 0., qtype); ZXVert z1 = diag.add_vertex(id); ZXVert z2 = diag.add_vertex(u_op); diag.add_wire(u, z1, ZXWireType::H, qtype); diff --git a/tket/src/ZX/include/ZX/Flow.hpp b/tket/src/ZX/include/ZX/Flow.hpp new file mode 100644 index 0000000000..d1baaf29eb --- /dev/null +++ b/tket/src/ZX/include/ZX/Flow.hpp @@ -0,0 +1,98 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Utils/BiMapHeaders.hpp" +#include "ZX/ZXDiagram.hpp" + +namespace tket { + +namespace zx { + +/** + * Data structure for flow in qubit MBQC. + * Different classes of flow exist based on the types of measurements and + * correction sets accepted, but the contents of the flow are the same. Causal < + * XY gflow < 3Plane gflow < Pauli flow + * + * `c` defines the correction set for each measured vertex. + * `d` approximates the partial order by giving the depth of the measurement + * from the output, i.e. d(u) < d(v) => v is measured before u. + */ +class Flow { + public: + Flow( + const std::map& c, + const std::map& d); + + // Returns the correction set for a given measured vertex (those vertices + // receiving an X correction) Will fail with a map.at error if v is not in the + // flow + ZXVertSeqSet c(const ZXVert& v) const; + // Returns the odd neighbourhood of the correction set for a given measured + // vertex (those vertices receiving a Z correction) Will fail with a map.at + // error if v is not in the flow + ZXVertSeqSet odd(const ZXVert& v, const ZXDiagram& diag) const; + // Returns the depth from the outputs in the ordering of the flow + // e.g. an output vertex will have depth 0, the last measured vertex has depth + // 1 + unsigned d(const ZXVert& v) const; + + // Verify that a flow is well-formed according to the Pauli flow conditions + // Throws a ZXError if any condition is violated + void verify(const ZXDiagram& diag) const; + + // Focusses a flow according to Lemma B.5, Simmons "Relating Measurement + // Patterns to Circuits via Pauli Flow" https://arxiv.org/pdf/2109.05654.pdf + void focus(const ZXDiagram& diag); + + // Attempts to identify a causal flow for a diagram + // Follows Algorithm 1 from Mhalla & Perdrix "Finding Optimal Flows + // Efficiently" https://arxiv.org/pdf/0709.2670.pdf O(n^2 log n) for n + // vertices + static Flow identify_causal_flow(const ZXDiagram& diag); + // Attempts to identify a Pauli flow for a diagram + // Follows Algorithm 1 from Simmons "Relating Measurement Patterns to Circuits + // via Pauli Flow" https://arxiv.org/pdf/2109.05654.pdf O(n^4) for n vertices + static Flow identify_pauli_flow(const ZXDiagram& diag); + + // Attempts to identify focussed sets according to Lemma B.10, Simmons + // "Relating Measurement Patterns to Circuits via Pauli Flow" + // https://arxiv.org/pdf/2109.05654.pdf + static std::set identify_focussed_sets(const ZXDiagram& diag); + + private: + // Correction sets + std::map c_; + // Approximate the partial order by recording the depth from outputs + std::map d_; + + // Solve for corrections using Gaussian elimination and back substitution + // Used within identify_pauli_flow + // correctors are those vertices which may be included in the correction sets + // preserve are those vertices which may not be included in the odd + // neighbourhood (unless being corrected) to_solve are those vertices that are + // yet to find corrections ys are all vertices with ZXType::PY The maps + // convert between row/column indices in the matrix and vertices in the + // diagram + static std::map gauss_solve_correctors( + const ZXDiagram& diag, const boost::bimap& correctors, + const boost::bimap& preserve, const ZXVertVec& to_solve, + const boost::bimap& ys); +}; + +} // namespace zx + +} // namespace tket diff --git a/tket/src/ZX/include/ZX/ZXDiagram.hpp b/tket/src/ZX/include/ZX/ZXDiagram.hpp index 084d46cf2b..32a61ee13d 100644 --- a/tket/src/ZX/include/ZX/ZXDiagram.hpp +++ b/tket/src/ZX/include/ZX/ZXDiagram.hpp @@ -20,9 +20,10 @@ namespace tket { namespace zx { -// Forward declare Rewrite, ZXDiagramPybind for friend access +// Forward declare Rewrite, ZXDiagramPybind, Flow for friend access class Rewrite; class ZXDiagramPybind; +class Flow; class ZXDiagram { private: @@ -166,6 +167,14 @@ class ZXDiagram { // Whether the diagram contains any symbolic parameters bool is_symbolic() const; + // Whether the diagram is graphlike (ZSpiders and H edges, Basics to + // boundaries) + bool is_graphlike() const; + + // Whether the diagram is MBQC (MBQC, Inputs, and Outputs, Basic from Input, H + // otherwise) + bool is_MBQC() const; + /** * Produces graphviz string, applying `highlights` to some vertices. * Inputs: @@ -293,6 +302,7 @@ class ZXDiagram { friend Rewrite; friend ZXDiagramPybind; + friend Flow; private: /** diff --git a/tket/src/ZX/include/ZX/ZXGenerator.hpp b/tket/src/ZX/include/ZX/ZXGenerator.hpp index 28e300c036..154bacf091 100644 --- a/tket/src/ZX/include/ZX/ZXGenerator.hpp +++ b/tket/src/ZX/include/ZX/ZXGenerator.hpp @@ -41,6 +41,7 @@ enum class ZXType { * Symmetric generators */ // Z (green) spider + // Equivalently, a (postselected) XY qubit (with negative phase) in MBQC ZSpider, // X (red) spider @@ -49,6 +50,35 @@ enum class ZXType { // Hbox Hbox, + // A (postselected) XY qubit in MBQC + // Corresponds to a Z spider with negative phase + XY, + + // A (postselected) XZ qubit in MBQC + // Corresponds to a 0.5-phase (n+1)-ary Z spider connected to a phaseful 1-ary + // X spider + XZ, + + // A (postselected) YZ qubit in MBQC + // Corresponds to a 0-phase (n+1)-ary Z spider connected to a phaseful 1-ary X + // spider + YZ, + + // A (postselected) Pauli X qubit in MBQC + // Corresponds to a Z spider with phase either 0 (param=False) or 1 + // (param=True) + PX, + + // A (postselected) Pauli Y qubit in MBQC + // Corresponds to a Z spider with phase either -0.5 (param=False) or +0.5 + // (param=True) + PY, + + // A (postselected) Pauli Z qubit in MBQC + // Corresponds to a 0-phase (n+1)-ary Z spider connected to a 1-ary X spider + // with phase either 0 (param=False) or 1 (param=True) + PZ, + /** * Directed (non-commutative) generators */ @@ -71,6 +101,9 @@ bool is_boundary_type(ZXType type); bool is_basic_gen_type(ZXType type); bool is_spider_type(ZXType type); bool is_directed_type(ZXType type); +bool is_MBQC_type(ZXType type); +bool is_phase_type(ZXType type); +bool is_Clifford_gen_type(ZXType type); // Forward declaration so we can use ZXGen_ptr in the interface of ZXGen class ZXGen; @@ -140,6 +173,8 @@ class ZXGen { ZXType type, QuantumType qtype = QuantumType::Quantum); static ZXGen_ptr create_gen( ZXType type, const Expr& param, QuantumType qtype = QuantumType::Quantum); + static ZXGen_ptr create_gen( + ZXType type, bool param, QuantumType qtype = QuantumType::Quantum); protected: ZXGen(ZXType type); @@ -172,23 +207,42 @@ class BoundaryGen : public ZXGen { }; /** - * Implementation of ZXGen for undirected (commutative) generators. + * Virtual subclass of ZXGen for undirected (commutative) generators. * `std::nullopt` is used for ports as there is no need to distinguish. * If the generator is Quantum, all adjacent wires must also be Quantum. * If the generator is Classical, adjacent wires can be either Quantum or - * Classical. Each known generator only uses a single parameter. + * Classical. + * Implementations include PhasedGen for generators with 1 Expr parameter or + * CliffordGen for Clifford generators with 1 bool parameter. */ class BasicGen : public ZXGen { public: - BasicGen( - ZXType type, const Expr& param, QuantumType qtype = QuantumType::Quantum); - - Expr get_param() const; + BasicGen(ZXType type, QuantumType qtype = QuantumType::Quantum); // Overrides from ZXGen virtual std::optional get_qtype() const override; virtual bool valid_edge( std::optional port, QuantumType qtype) const override; + virtual bool operator==(const ZXGen& other) const override; + + protected: + const QuantumType qtype_; +}; + +/** + * Implementation of BasicGen for phased generators, e.g. spiders, Hbox. + * Each generator has a single Expr parameter which is: + * - A complex number for Hbox + * - A real-valued phase in half-turns otherwise + */ +class PhasedGen : public BasicGen { + public: + PhasedGen( + ZXType type, const Expr& param, QuantumType qtype = QuantumType::Quantum); + + Expr get_param() const; + + // Overrides from ZXGen virtual SymSet free_symbols() const override; virtual ZXGen_ptr symbol_substitution( const SymEngine::map_basic_basic& sub_map) const override; @@ -196,10 +250,32 @@ class BasicGen : public ZXGen { virtual bool operator==(const ZXGen& other) const override; protected: - const QuantumType qtype_; const Expr param_; }; +/** + * Implementation of BasicGen for Clifford generators. + * The basis is determined by the ZX type, and the boolean parameter determines + * the discrete phase (false = 0 versus true = 1 half-turn). + */ +class CliffordGen : public BasicGen { + public: + CliffordGen( + ZXType type, bool param, QuantumType qtype = QuantumType::Quantum); + + bool get_param() const; + + // Overrides from ZXGen + virtual SymSet free_symbols() const override; + virtual ZXGen_ptr symbol_substitution( + const SymEngine::map_basic_basic& sub_map) const override; + virtual std::string get_name(bool latex = false) const override; + virtual bool operator==(const ZXGen& other) const override; + + protected: + const bool param_; +}; + /** * Virtual subclass of ZXGen for directed (non-commutative) generators. * The generator has a pre-determined number of ports labelled from 0 to diff --git a/tket/tests/CMakeLists.txt b/tket/tests/CMakeLists.txt index 527ff6bfc7..84b3c17691 100644 --- a/tket/tests/CMakeLists.txt +++ b/tket/tests/CMakeLists.txt @@ -36,12 +36,18 @@ ENDIF() add_definitions(-DALL_LOGS) +set(TESTS_FULL no CACHE BOOL "Run full set of tests") + +if (TESTS_FULL) + add_definitions(-DTKET_TESTS_FULL) +endif() set(TKET_TESTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}) +include(tkettestutilsfiles.cmake) include(tkettestsfiles.cmake) -add_executable(test_tket ${TEST_SOURCES}) +add_executable(test_tket ${TESTUTILS_SOURCES} ${TEST_SOURCES}) target_link_libraries(test_tket PRIVATE tket-ArchAwareSynth @@ -59,10 +65,13 @@ target_link_libraries(test_tket PRIVATE tket-PauliGraph tket-Predicates tket-Program - tket-Routing + tket-Placement + tket-TokenSwapping + tket-Mapping tket-Simulation tket-Transformations tket-Utils tket-ZX) -target_link_libraries(test_tket PRIVATE ${CONAN_LIBS_SYMENGINE}) +target_link_libraries(test_tket PRIVATE + ${CONAN_LIBS_FMT} ${CONAN_LIBS_SPDLOG} ${CONAN_LIBS_SYMENGINE}) diff --git a/tket/tests/Circuit/test_Boxes.cpp b/tket/tests/Circuit/test_Boxes.cpp index 54903436c8..9d75f9efeb 100644 --- a/tket/tests/Circuit/test_Boxes.cpp +++ b/tket/tests/Circuit/test_Boxes.cpp @@ -380,6 +380,12 @@ SCENARIO("Pauli gadgets", "[boxes]") { Eigen::MatrixXcd u = tket_sim::get_unitary(c); REQUIRE((u - Eigen::Matrix4cd::Identity()).cwiseAbs().sum() < ERR_EPS); } + GIVEN("complex coefficient") { + Expr ei{SymEngine::I}; + PauliExpBox pebox({Pauli::Z}, ei); + Expr p = pebox.get_phase(); + REQUIRE(p == ei); + } } SCENARIO("box daggers", "[boxes]") { diff --git a/tket/tests/Circuit/test_Circ.cpp b/tket/tests/Circuit/test_Circ.cpp index 7d0f43f48b..af5c647588 100644 --- a/tket/tests/Circuit/test_Circ.cpp +++ b/tket/tests/Circuit/test_Circ.cpp @@ -1288,6 +1288,35 @@ SCENARIO("Test next slice") { } } +SCENARIO("Test next quantum slice") { + GIVEN("A simple circuit") { + Circuit circ(3, 1); + Vertex v1 = circ.add_op(OpType::X, {0}); + Vertex v2 = + circ.add_conditional_gate(OpType::Rx, {0.6}, {1}, {0}, 1); + Vertex v3 = + circ.add_conditional_gate(OpType::Ry, {0.6}, {2}, {0}, 1); + Vertex v4 = circ.add_op(OpType::S, {2}); + Vertex v5 = circ.add_op(OpType::T, {1}); + + auto frontier = std::make_shared(); + for (const Qubit& q : circ.all_qubits()) { + Vertex in = circ.get_in(q); + frontier->insert({q, circ.get_nth_out_edge(in, 0)}); + } + CutFrontier slice_front = circ.next_q_cut(frontier); + Slice sl = *slice_front.slice; + WHEN("The frontier is calculated from inputs") { + THEN("The first slice is recovered accurately.") { + REQUIRE(sl.size() == 3); + REQUIRE(sl[0] == v1); + REQUIRE(sl[1] == v2); + REQUIRE(sl[2] == v3); + } + } + } +} + SCENARIO("Test circuit.transpose() method") { GIVEN("Simple circuit") { Circuit circ(2); @@ -2580,11 +2609,17 @@ SCENARIO("Named operation groups") { Op_ptr x_op = get_op_ptr(OpType::X); REQUIRE(c.substitute_named(x_op, "group1")); + std::unordered_set opgroups({"group1", "group2"}); + REQUIRE(c.get_opgroups() == opgroups); + Circuit c2(2); c2.add_op(OpType::T, {0}); c2.add_op(OpType::CRx, 0.1, {0, 1}, "group2a"); REQUIRE(c.substitute_named(c2, "group2")); + std::unordered_set opgroups2({"group1", "group2a"}); + REQUIRE(c.get_opgroups() == opgroups2); + REQUIRE(c.count_gates(OpType::H) == 1); REQUIRE(c.count_gates(OpType::S) == 0); REQUIRE(c.count_gates(OpType::X) == 3); @@ -2618,6 +2653,8 @@ SCENARIO("Named operation groups") { Circuit c1 = c; REQUIRE(c == c1); + REQUIRE(c.get_opgroups() == opgroups2); + REQUIRE(c1.get_opgroups() == opgroups2); } GIVEN("Negative tests for operation groups") { Circuit c(2); diff --git a/tket/tests/Graphs/EdgeSequence.hpp b/tket/tests/Graphs/EdgeSequence.hpp index 88747bafd9..05c88a9617 100644 --- a/tket/tests/Graphs/EdgeSequence.hpp +++ b/tket/tests/Graphs/EdgeSequence.hpp @@ -18,6 +18,8 @@ #include #include +#include "Utils/RNG.hpp" + namespace tket { namespace graphs { @@ -25,8 +27,6 @@ class AdjacencyData; namespace tests { -class RNG; - /** * For having a whole sequence of checked edges * to add to a graph in a specific order, diff --git a/tket/tests/Graphs/RandomGraphGeneration.cpp b/tket/tests/Graphs/RandomGraphGeneration.cpp index af78421259..45c2923d68 100644 --- a/tket/tests/Graphs/RandomGraphGeneration.cpp +++ b/tket/tests/Graphs/RandomGraphGeneration.cpp @@ -19,9 +19,8 @@ #include "EdgeSequence.hpp" #include "Graphs/AdjacencyData.hpp" -#include "RNG.hpp" +#include "Utils/RNG.hpp" -using std::size_t; using std::vector; namespace tket { diff --git a/tket/tests/Graphs/RandomPlanarGraphs.cpp b/tket/tests/Graphs/RandomPlanarGraphs.cpp index 065200cc2c..406e6fc01d 100644 --- a/tket/tests/Graphs/RandomPlanarGraphs.cpp +++ b/tket/tests/Graphs/RandomPlanarGraphs.cpp @@ -16,9 +16,8 @@ #include -#include "RNG.hpp" +#include "Utils/RNG.hpp" -using std::size_t; using std::vector; namespace tket { diff --git a/tket/tests/Graphs/RandomPlanarGraphs.hpp b/tket/tests/Graphs/RandomPlanarGraphs.hpp index 6a1f047d17..b29b159ad2 100644 --- a/tket/tests/Graphs/RandomPlanarGraphs.hpp +++ b/tket/tests/Graphs/RandomPlanarGraphs.hpp @@ -18,12 +18,12 @@ #include #include +#include "Utils/RNG.hpp" + namespace tket { namespace graphs { namespace tests { -class RNG; - /** * For testing purposes only, not of much independent interest * (and definitely an inefficient implementation). diff --git a/tket/tests/Graphs/test_GraphColouring.cpp b/tket/tests/Graphs/test_GraphColouring.cpp index 0758357ff3..18b4016122 100644 --- a/tket/tests/Graphs/test_GraphColouring.cpp +++ b/tket/tests/Graphs/test_GraphColouring.cpp @@ -19,18 +19,16 @@ #include "GraphTestingRoutines.hpp" #include "Graphs/AdjacencyData.hpp" #include "Graphs/GraphColouring.hpp" -#include "RNG.hpp" #include "RandomGraphGeneration.hpp" #include "RandomPlanarGraphs.hpp" +#include "Utils/RNG.hpp" using std::map; -using std::size_t; using std::vector; namespace tket { namespace graphs { namespace tests { -namespace test_GraphColouring { SCENARIO("Test many colourings: random trees") { RNG rng; @@ -363,7 +361,6 @@ SCENARIO("Test Mycielski graphs") { test_Mycielski_graph_sequence(graph, 2, 9); } -} // namespace test_GraphColouring } // namespace tests } // namespace graphs } // namespace tket diff --git a/tket/tests/Graphs/test_GraphFindComponents.cpp b/tket/tests/Graphs/test_GraphFindComponents.cpp index bd9b61db06..059f3d8ff0 100644 --- a/tket/tests/Graphs/test_GraphFindComponents.cpp +++ b/tket/tests/Graphs/test_GraphFindComponents.cpp @@ -17,17 +17,15 @@ #include "Graphs/AdjacencyData.hpp" #include "Graphs/GraphRoutines.hpp" -#include "RNG.hpp" +#include "Utils/RNG.hpp" using std::map; using std::set; -using std::size_t; using std::vector; namespace tket { namespace graphs { namespace tests { -namespace test_GraphFindComponents { // For testing the connected component function struct ComponentsTestData { @@ -185,7 +183,6 @@ SCENARIO("Correctly calculates graph components") { } } -} // namespace test_GraphFindComponents } // namespace tests } // namespace graphs } // namespace tket diff --git a/tket/tests/Graphs/test_GraphFindMaxClique.cpp b/tket/tests/Graphs/test_GraphFindMaxClique.cpp index 048efdd595..38c1b00d01 100644 --- a/tket/tests/Graphs/test_GraphFindMaxClique.cpp +++ b/tket/tests/Graphs/test_GraphFindMaxClique.cpp @@ -18,16 +18,14 @@ #include "Graphs/AdjacencyData.hpp" #include "Graphs/GraphRoutines.hpp" #include "Graphs/LargeCliquesResult.hpp" -#include "RNG.hpp" +#include "Utils/RNG.hpp" using std::set; -using std::size_t; using std::vector; namespace tket { namespace graphs { namespace tests { -namespace test_GraphFindMaxClique { struct MaxCliqueTestData { vector> raw_adjacency_data; @@ -256,7 +254,6 @@ SCENARIO("Correctly calculates max cliques") { CHECK(cliques_seen == 160); } -} // namespace test_GraphFindMaxClique } // namespace tests } // namespace graphs } // namespace tket diff --git a/tket/tests/Placement/test_NeighbourPlacements.cpp b/tket/tests/Placement/test_NeighbourPlacements.cpp new file mode 100644 index 0000000000..a167c6ea35 --- /dev/null +++ b/tket/tests/Placement/test_NeighbourPlacements.cpp @@ -0,0 +1,147 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "../testutil.hpp" + +namespace tket { +namespace test_NeighbourPlacements { + +using Connection = Architecture::Connection; + +SCENARIO("class NeighbourPlacments") { + GIVEN("a realistic-ish instance") { + Architecture arc( + {{Node(4), Node(5)}, + {Node(5), Node(6)}, + {Node(6), Node(7)}, + {Node(5), Node(7)}}); + qubit_mapping_t map( + {{Qubit(0), Node(4)}, + {Qubit(1), Node(5)}, + {Qubit(2), Node(6)}, + {Qubit(3), Node(7)}}); + NeighbourPlacements np(arc, map); + + WHEN("Getting a placement dist=0") { + auto res = np.get(0, 1); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + THEN("The resulting map is identical") { + for (auto [k, v] : map) { + REQUIRE(new_map.contains(k)); + REQUIRE(new_map[k] == v); + } + } + } + + WHEN("Getting a placement dist=2, optimise=true") { + auto res = np.get(2, 1); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + THEN("The results are valid") { + REQUIRE(new_map.size() == 4); + REQUIRE(swaps.size() == 2); + for (unsigned i = 0; i < 4; ++i) { + REQUIRE(new_map.contains(Qubit(i))); + } + } + THEN("The resulting map is correct") { + REQUIRE(new_map[Qubit(0)] == Node(4)); + REQUIRE(new_map[Qubit(1)] == Node(7)); + REQUIRE(new_map[Qubit(2)] == Node(5)); + REQUIRE(new_map[Qubit(3)] == Node(6)); + } + THEN("The swaps are correct") { + REQUIRE(swaps[0] == std::pair{Node(5), Node(7)}); + REQUIRE(swaps[1] == std::pair{Node(5), Node(6)}); + } + } + WHEN("Getting 10 placement dist=3, optimise=true") { + auto res = np.get(3, 10); + THEN("There are 10 resulting placements") { REQUIRE(res.size() == 10); } + } + } + GIVEN("the simplest possible instance") { + Architecture arc(std::vector>{{Node(0), Node(1)}}); + qubit_mapping_t map({{Qubit(0), Node(0)}, {Qubit(1), Node(1)}}); + NeighbourPlacements np(arc, map); + WHEN("Getting a placement dist=2, optimise=false") { + auto res = np.get(2, 1, false); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + THEN("Both swaps are identical") { + REQUIRE(swaps.size() == 2); + REQUIRE(swaps[0] == swaps[1]); + } + } + WHEN("Getting a placement dist=2, optimise=true") { + THEN("Con only find a solution with dist=1") { + auto res = np.get(2, 1, true); + REQUIRE(res.size() == 1); + REQUIRE(res.front().swaps.size() == 1); + } + } + WHEN("Getting two placements of dist=1") { + THEN("Can only find one result") { + REQUIRE(np.get(1, 2, false, 100).size() == 1); + } + } + } + GIVEN("an instance with unlucky seed") { + Architecture arc({{Node(0), Node(1)}, {Node(1), Node(2)}}); + qubit_mapping_t map( + {{Qubit(0), Node(0)}, {Qubit(1), Node(1)}, {Qubit(2), Node(2)}}); + NeighbourPlacements np(arc, map); + + // find unlucky seed + unsigned seed; + for (seed = 0; seed < 10; ++seed) { + auto res = np.get(2, 1, false, seed); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + REQUIRE(swaps.size() == 2); + if (swaps[0] == swaps[1]) { + break; + } + } + THEN("There is an unlucky seed") { REQUIRE(seed < 10u); } + + WHEN("Getting a placement dist=2, optimise=false and fixed seed") { + auto res = np.get(2, 1, false, seed); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + THEN("Both swaps are identical") { + REQUIRE(swaps.size() == 2); + REQUIRE(swaps[0] == swaps[1]); + } + } + WHEN("Getting a placement dist=2, optimise=true and fixed seed") { + auto res = np.get(2, 1, true, seed); + THEN("There is a single result") { REQUIRE(res.size() == 1); } + auto [new_map, swaps] = res.front(); + THEN("Both swaps are now different") { + REQUIRE(swaps.size() == 2); + REQUIRE(swaps[0] != swaps[1]); + } + } + } +} + +} // namespace test_NeighbourPlacements +} // namespace tket diff --git a/tket/tests/test_Placement.cpp b/tket/tests/Placement/test_Placement.cpp similarity index 91% rename from tket/tests/test_Placement.cpp rename to tket/tests/Placement/test_Placement.cpp index 4dc40f9db0..2167f8db92 100644 --- a/tket/tests/test_Placement.cpp +++ b/tket/tests/Placement/test_Placement.cpp @@ -15,8 +15,8 @@ #include #include -#include "Routing/Placement.hpp" -#include "testutil.hpp" +#include "../testutil.hpp" +#include "Placement/Placement.hpp" namespace tket { namespace test_Placement { @@ -434,16 +434,6 @@ SCENARIO("Check Monomorpher satisfies correct placement conditions") { } Monomorpher morph(test_circ, arc, {}, {10, arc.n_connections()}); - /*std::vector results = morph.place(1); - THEN("The circuit is placed in the highly connected region.") { - std::set middle_nodes = {5, 6, 9, 10}; - for (auto map : results) { - for (auto mapping : map.map) { - REQUIRE(middle_nodes.find(arc.map_node( - mapping.second)) != middle_nodes.end()); - } - } - }*/ } } } @@ -497,9 +487,68 @@ SCENARIO( REQUIRE(potential_maps.size() > 0); } } +SCENARIO("Test NaivePlacement class") { + Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}}); + GIVEN( + "No Qubits placed in Circuit, same number of qubits and architecture " + "nodes.") { + Circuit test_circ(7); + NaivePlacement np(test_arc); + qubit_mapping_t p = np.get_placement_map(test_circ); + REQUIRE(p[Qubit(0)] == Node(0)); + REQUIRE(p[Qubit(1)] == Node(1)); + REQUIRE(p[Qubit(2)] == Node(2)); + REQUIRE(p[Qubit(3)] == Node(3)); + REQUIRE(p[Qubit(4)] == Node(4)); + REQUIRE(p[Qubit(5)] == Node(5)); + REQUIRE(p[Qubit(6)] == Node(6)); + } + GIVEN("No Qubits placed in Circuit, less qubits than architecture nodes.") { + Circuit test_circ(6); + NaivePlacement np(test_arc); + qubit_mapping_t p = np.get_placement_map(test_circ); + REQUIRE(p[Qubit(0)] == Node(0)); + REQUIRE(p[Qubit(1)] == Node(1)); + REQUIRE(p[Qubit(2)] == Node(2)); + REQUIRE(p[Qubit(3)] == Node(3)); + REQUIRE(p[Qubit(4)] == Node(4)); + REQUIRE(p[Qubit(5)] == Node(5)); + } + GIVEN( + "Some Qubits placed in Circuit, same number of qubits and architecture " + "nodes.") { + Circuit test_circ(4); + test_circ.add_qubit(Node(0)); + test_circ.add_qubit(Node(1)); + test_circ.add_qubit(Node(2)); + NaivePlacement np(test_arc); + qubit_mapping_t p = np.get_placement_map(test_circ); + + REQUIRE(p[Qubit(0)] == Node(3)); + REQUIRE(p[Qubit(1)] == Node(4)); + REQUIRE(p[Qubit(2)] == Node(5)); + REQUIRE(p[Qubit(3)] == Node(6)); + REQUIRE(p[Node(0)] == Node(0)); + REQUIRE(p[Node(1)] == Node(1)); + REQUIRE(p[Node(2)] == Node(2)); + } + GIVEN("Some Qubits placed in Circuit, less qubits than architecture nodes.") { + Circuit test_circ(2); + test_circ.add_qubit(Node(0)); + test_circ.add_qubit(Node(1)); + test_circ.add_qubit(Node(2)); + NaivePlacement np(test_arc); + qubit_mapping_t p = np.get_placement_map(test_circ); + + REQUIRE(p[Qubit(0)] == Node(3)); + REQUIRE(p[Qubit(1)] == Node(4)); + REQUIRE(p[Node(0)] == Node(0)); + REQUIRE(p[Node(1)] == Node(1)); + REQUIRE(p[Node(2)] == Node(2)); + } +} // Tests for new placement method wrappers - SCENARIO( "Does the base Placement class correctly modify Circuits and return " "maps?") { diff --git a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp new file mode 100644 index 0000000000..4ed7727aa1 --- /dev/null +++ b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.cpp @@ -0,0 +1,2644 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "FixedCompleteSolutions.hpp" + +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +FixedCompleteSolutions::FixedCompleteSolutions() { + solutions["IBM q16"] = { + "1e:2d:3c:4b:5a:69:8:8:9:a:b:c:d:e", + "5634592312451dde_0346", + "7889781d012312de_136789d", + "45345956ab23bccd4534_456a", + "ab1d9acddebcabcdbc1dcdbc_19ae", + "8968cd1d78bcab899aabcdbc9a_179d", + "bc1201ab230e3b129aab897801_123c", + "1d01bccd12bcab239a89ab783445_01ab", + "1d6823349a1259decd2312ab45_23589de", + "9a0e890168bccd89781201230e_2567abe", + "4a891245ab239acd8978de1d12_0189abcde", + "2c56124523593478bcdecd89bcabbc_125679e", + "1d56781223de595645342c3babbccd_124679abcd", + "562368bc45cd34de2c2312ab567889453445_1235678bc", + "ab129a89234a9a78bc01566889abcd9ade2c452334234578", + "de2334abbc9aab5956788901cd1d12bc9aabbc452ccd1d56", + "bc5623349aab2c121d0ede3b23124534452312ab56_12369abcde", + "231dcd9a784a45899a893bab4ade45bc78345645342312cd1dde0e", + "4a6878cdbcab451dde9acdabbc2ccd89ab9a89de780e_013456789bde", + "1245bc1d9aab899a7859232cbcab4a34cdbccddecd120112_135789bcd", + "3468233bbc12459a0134ab0e23899a347845bcabcd1d01bc_2345679bc", + "59231dcd5678348978459abcab0edebccdbc1d0134ab9aab_01234789b", + "7823ab4a9a0e121d4a3bab01899aab891223bccdde34cdbc_1234578bce", + "34452c9a2389567812340e011dde2345340e011223344aabbccd569aabbc", + "bc011dcd2cab3b9aab9a683b560e124523344512bcab01789a899a78abbc", + "bc012c1289ab01599a89595678bc59ab9acdbcde34cdbcab9a8959453423", + "3b34452c12abbc68012378120e9a891d56ab9a238934234512_0123678bde", + "560e1dbc45abcd68019a788978129a4ade23345645345623_013456789cde", + "010ebc2cab9abcab2cbc689a34125678452301683412899a23_01236789acde", + "12780e2359de569a3b34599acd0112abbccdab2301899a89de_012345678abe", + "1d347812cd3b0e230112de34892334450e34cdbc56abbc59cd9a8978_01247de", + "2cab3b0123cdde78123459ab459a5634ab8923bc9aab9a8912cd2c_023679bcde", + "34781d23129adeab68bccd899a56784534bc23120112ab9a23ab897889_46789de", + "894acdbcab9ade8901bccdbc12deab569a7889789a4a342312012334_034578bde", + "1dbc234a9acd3b0eabbcab9acd687889de9aab3b2c340112231201_01234569acde", + "4a9a68122356bcab0189de12239a01ab89780e89cdbcabcd9a894578_02346789be", + "562c12592389bcab34cdbcdecd9a0189123bbc01abcd9ade78893bcd_01234589acde", + "bc9a23ab9a12cd3b237834ab899ade12abcd4556bc3b013412231201_01234579abcd", + "0e9aabcd3b2368ab899aab89bccd788934ab9a128978234aab3b12_0123456789abcde", + "123b9aab1d893423bc019aab1278688959344501342cbc9aabbc9a_0123456789abcde", + "89abbc9acdab3b2334de4556129aabbc2c2334455668ab9a34780123122334899a01ab", + "2312232cbcab4acdde4534687856019a45bcab23bc349acd8912bcabbc2c233445599a8" + "9", + "2359684a2c23cdbcabcd9a890ede1dbccdbcab9a7868899a4a4578342312453401231223" + "34", + "1d3b2c123401231245234a3489232ccdbc68deab789a899aab78bccddecdbc_" + "01235789bcde", + "9a2c233b89ab34decd1d12233b9aab9a5645bc122ccdab9a6878899aabbc_" + "0123456789abcde", + "4a9a344a5678128978239a1268bc3b344ade230145122334452312010e011223_" + "012345679abe", + "4a34ab9a122ccd568923011278cd23bcab3b349a45ab34bccd2312010e0112233445_" + "012346789acd", + "890ede4534abbc9a897823563bcd01de89abbc0e129a45ab23344523bc9acd1289bc_" + "01356789abce", + "01893445239a89120e78342345ab89681d12bc0112233423121dcddebc56ab9aabbccdbc" + "ab_0245678abcde", + }; + + solutions["IBM q20"] = { + "15:26:36:89:89:6ab:7ab:8cd:9cd::bf:cgh:dg:eij:ij:g:h::", + "5a67bg056a6b48cdaf8cbcab_047acg", + "16676adi5b7d678c12cg26fgbg_1258ai", + "af6abccdbcdj5bbg267d671601_6bdfgj", + "16de677ddjcd5b8cbc89abbc8ccg_1589ce", + "6b26de6b7dbc6756abdicd67decg_6bcdei", + "78dj67167dbc8c67dicgfgcd56cgcddjcd_8bfgij", + "5a7ddecddeejafghbc2648cg058c5bbh898c5b_02457cdehj", + "678d7d1612dedj38487d6b6a6701di781667cdcgfg_123467adij", + "fg26ei05bh5b56167ddj6a671678cg6a7d0148cdbg_012abdefgj", + "cg67bcaf6b16dj7ddibh5aej677c7d016716266b233949_0127bcefij", + "238d26055b6789di16677d788cbcbgdjcdaf38fgghcg48djghdecd23de", + "6b382326677dgh5605abdifg89168d48dj8c8d67bcde7d5babdj056789", + "cd5b38dj39bc67ej8c6a2367168d7d67de12af89488ddi7d_13479abdej", + "23cg567d67de268ddjbg6a898d7d671601di051248237ddj_0123567abegj", + "266bbg166a89bh67deaf8dab5b48di78057d23bh26di6b6726_12359aefhi", + "8d6abh3916cddj48238c8dbc7d38di7d8ccg5bfg05af6a26_146789cefghj", + "ab67cg7dbc5b5667dj1612gh0178cdcgfg8ccgbg6b67cd38di8d4948de8d38", + "126a56388daf5aei380548djbg8c5a678d49896bbgcg7dde_1345689abdefgj", + "26gh78ej67ab3923018d7d488c5b16cgbcaf5b67cddi1612cd_0245689adefg", + "78676adeghcgcdbcei268967dj8ddj38488cfg23cg8c4889fg_024789befhij", + "fg16de6bbc5b8ccg238d05488d8cbcdecd3926896b2623cgdi_3456789bcefj", + "6bbgfg055bbc267c7dcgde7dab8cbcab6a677d2612di488d23gh8939fgdj8ddj", + "678d78ghdjej6a89fg8dcgfg163805af0167di7d67di568c48_1456789defghij", + "6b5b16cgbc8ccd67de48difgcd3923cgghfg6a8ccddj057d67dj7d6726_15689abcdfij", + "8c67486b23268d5aghbhbcde488ddj56af7ccdcgdi39232623cddicd_345789bdefghij", + "8cbhbcde38ab6bbgbh26237c167d6739567d16di8dde2312012338_012346789bcdeghi", + "6716dj6a387d26de6b67788caf6acd2389difgcg8c893923266a48ghcgcd8d485bbc5b0" + "5", + "6b26896a8c7867166abh48bccddj1638cgbh7d01di8dfgcg1223dedi12_" + "013579abcdeghi", + "cgcddi8cgh12bccd2301decgcdab89fgcgbgdj8ccd4805djab786726_" + "01356789abcefghij", + "6a266abhaf677d48cdbcdiab16675a7dcddecd238c2656bc5b05265b23bc_" + "1234579acdfghi", + "6a6b268d8c67896adj05fgaf8c7dcg6ade671667cdbgdi8dbc7c488ddj_" + "0123678abcdefhij", + "cg26gh78bh8dde126b2339abaffg6a388c481201dj8cbc8ccddiabbccddj_" + "12348abdefghij", + "48deej5b566a8d8ccgbc7ddifg7d67bh2623398978055b16cg488cbh236a8ccgfgaf6a16" + "1223", + "8cbc6726388c895b7d677d160105de6a7dgh67fg7d8ddjcggh4838cgde_" + "0123456789bcdefhi", + "di381623385b6abc05676aabcd5baf89djbccd5adebhbc8dafde488cbc_" + "0135689abcdefghij", + "16126b01bg16677dbhdifg8cbcab6a677dde8939238ccg4849398ddjfgaf6a677dcdbcbh" + "dj5667", + "8d676abc38dj8d488cde167ddjdi67cgfgaf5a566bcd7d89cg8c1689di_" + "0123456789abcdefghij", + "8d78dibc016b23895bbh5acd128ccg89af48fg6701dj382326678dde89cgcd48_" + "02345789abcdefghj", + "163839cgcd7c67dj6a7d6701di482612ghcgcd7d676bde8d7d2339264849de_" + "0123456789abcdefghij", + "267d675b566a39djbc7ddj23bhdefg8ddi673938787dcd12bc67dibg2305cddefg_" + "025679abcdefghij", + "5a56djbgcd05defgbgcg8c2367ghcg8c7d4867gh125623786789dide39238dde8d_" + "01245689abcdefhj", + "bcdi5b7c78cg670516cd7dcg5a566748de388ccgafaffgcg8c8d7d67dj2638_" + "0123456789abcdefghij", + "38268ddj05167dbc6bbggh48de01cddeei6bbccg8cfg5a895bbc8c05382326675bbg_" + "013456789befghij", + "bc165a055b5a6a8c67ghcg38122667fg7838898c8dbcdicddjcd48cgfgaf8c89abbc_" + "012345789abcdefhi", + "5a6acdcgbc2326fgeidj49677ddjcd05decdcg5667fgafcddjcdbcbh8c6b3823266bbc_" + "0245678abcdefghj", + "cd676a2678676bbgcg385afgcgghdicg7d7838dj3948238c8d395bbccgfgaf05264856_" + "0123456789abcdefghij", + "67486b26di7ddjbhbc67166a7d677ddecd8d26bc895bcg238ccgfg26bc5bcg8c6a480526" + "238ccg_02345679abdefghij", + "bc7ccg2348677d67bh38djabcdcgfg56de67787d671667bhgh12di488d23388d7d675605" + "01dj_0123456789abcdefghij", + }; + + solutions["Triangular grid 3x3"] = { + "145:256:367:7:589:69a:7ab:b:9cd:ade:bef:f:d:e:f", + "89de498c9e_49ef", + "27129d5956ab_7bcd", + "de26af596b6a9e_25ef", + "156aaf1267377b9a_35af", + "23126aae6a0104488c_36ae", + "26aeab5a7b371559159d_12be", + "9aaf455a8dae15ab_569bcdef", + "8d9aab151248011604ae67de8d6a_024589de", + "9dab7b9a8927239e6aaf49bf1226_237bcdef", + "378c046b56ae7bbf167b6a26ae37_012346789be", + "af1237488d5a0115568c674556899aab7b_234678cf", + "9a158912deab5a59157b4556cd268c4823_12478abcd", + "cd59ab157b459d23af591215595a055a_1345679acdef", + "23055915489e5912238d0545566701164548_0135789ad", + "04379aab48595a45ae5aefde1601ae121623_0134569bdef", + "1267566759159d49af23decd1259012312de01_023579def", + "129a9d59abbf89159a9e8c5912897b055956_24589abcdef", + "7bbf5a9a37497bef15debf48efcdde45569e45_134789abf", + "9aab9d7b0104124837679a8915569d37decd_0345678abde", + "1589459aabaede265aafae12cd675a5667230459_024578bde", + "9d26046a5923bf1612567b01bf7b6756599e49489e9dcd0448", + "6756bf234559019eef12de152712236a2659156a59_03579abe", + "157b8912bfde9e23ef7b04018c4804168c0112_01235678bcdef", + "6a04aeef375667562659566a898d8c01ab9a4948ab040126488c", + "0559de898c6a9abf237baf48040104122315599d5915_35689cdf", + "2327898c7b48ae451516debf458d5a4804016712_012346789cde", + "27deef561523ae1201ab456b56450448049a9d9a_01234589abcd", + "05ef166756deabae6b8d8c896a9a67de3704010412_0134578bcdf", + "5956af2645238c5a489d055aae4515591215459d488c_34569cdef", + "598948158cef599ede8948bf9a7b59ab56ef011601_012456789cdef", + "9aae26048c49486b012316456a26569d67599d6bbf04_012389abcdef", + "05015a45491216678904bf4856599daecd236baedecd8c480401162623", + "455659167b9d23376b9a16480412010448afef7bbf7b_012345689abef", + "5626129a238c455645488ddeefaf04ab486726120104488ccddeaeab23", + "45566a4849efab5a9a01059dabbf16128c9a4823ab9a59151223379d4548", + "5a2701af23160459125a4801041201122312155aabaedecd_01234589bef", + "239a01049d599e4856261227010449af8c899a9eab7b377b_013456789ace", + "5956157b26deabbfefde9d899a89046a262337011201_0123456789abcdef", + "45488c125a451516ab9d9a48ab45567baf233767ef564548_0235678abcdef", + "591215de9ecdefab595aab8956672712014556162359129ede15458948ef23", + "895945ae568c056aab9d2656454867569a67239d6a266a_0123456789abcdef", + "9a9d049a9eefdeab498c480104488ccddeae6a26231215_0123456789abcdef", + "48abde0515129a8c23af899a5a4515596b679d594556126756_123456789bcdf", + "9e04499d599a5667898c59af9d89377b1237019a6a9a04480401_23456789acde", + "6a9a9e8d057b27ab48899a5a057b377baf8c899a8cabaf7b37_0123456789abcdef", + "5a1245af5605016b04599e4989566704239a12159d599d151223_0123456789abcdef", + "1227238c0115045a5648ef12016bde2312049e488c480159566bbfef_01234569abcef", + "454856ab6b598c159d7b9a5aaf0112232689565aafbf7b671501599d_" + "0123456789abcdef", + }; + + solutions["Triangular grid 4x4 with 2 stacked layers"] = { + "156p:267q:378r:489s:9t:6abu:7bcv:8cdw:9dex:ey:bfgz:cghA:dhiB:eijC:jD:" + "gklE:hlmF:imnG:jnoH:oI:lJ:mK:nL:oM:N:quv:rvw:swx:txy:y:vzA:wAB:xBC:yCD:" + "D:AEF:BFG:CGH:DHI:I:FJK:GKL:HLM:IMN:N:K:L:M:N", + "xD78flwxKLci7c27chDIsxfgij7cGMAB7w8e3shi67yDBHiHgh56fkfgHNfEBHtywB7drwwB" + "uvvBcB_67fghijuwADGILN", + "vw2334FK125u010506wx7cdi7823gF12AFbcab0p1qxy7cwxgh9y6bEJ4t166bABvwzEuzch" + "vAbAuvvwrwrs3s_2346abcgiuvwyJK", + "56lKfkGLqvFK78afGHcidi5u67FG56165a122305HIioGHAGbcaf1qgmglmL0pabbg6bbgty" + "pqyD06eDqr7889bA_1568dgikltuvIKL", + "BGab16zEfkbc6bbhAFabEJcBhmFKstcdfgrs2706uzchCDghzEglcBpuBCqrvApqgFuzcBxC" + "hGpuqrpq5urs0p3s_12acdhkmtDEFGJK", + "6bFKJKuzBHgFGMKL5a6706ghvBBGHNcBLMlm569edediFGqvBH3423hiwBijGH0pghgF7827" + "hiuvbhHNrwvwwxsxst_479abegjlzHJKMN", + "AB05GHch2riopv7c5aHNdi8d27invBFKBHLM7dDIyDvB9yafdj23didCoNFGuvfkvwxCwCdC" + "BGwBrwBGrsst5awCsx56677wwx_02fhnopruABGIKM", + "AFfgcdxCnopqgh8dqwrsfknMfg8x12syhGgFvAglAFbgbc1qbgqrwBpuklcdsxLMzEqv38BC" + "AB898dgF8xHMINEJzAvA5uzEqvqwhiijhi_189cdfhikloqrswxzABCFGLMN", + "7drwCI5638azGL67sttyAB0p7wwx9eabsxdCbcBHflvB4t6vejjI9ystwBHI5abAsyci06jo" + "qvvABGGLGHCHHM8ezFqvBC7c8xFGaf5aafEF7dEJch8d67_" + "056789adejlqrstwxyzBCHILN", + "BG89012717788d7c67fgdjchFG56zEhG38agCIcdchjIghagEJzAzEqrMNnM7wbAxC05AG6c" + "ioci7c6c2rglbgsxxC6b5auzmnuvHM050127BCbgGMhnAGABnogl0pBCpuCH_" + "0125679abcdfhlnorsBEFGIJN", + "FK06qrCDbh6cafyD5b17vAAFabvw5uGLFKwCcBch23cBzE8e3427rs28tylK38BGCDgmyDBC" + "xCFKditystagbcABcd12iHgm7chmdezA5601CIcB787cchhGbcGLDIABINoN12BCdisxzExC" + "AB_02347cdefghmqtvwyBCDEFHIL", + "GHGMnM49wxghhG9eCDlmqvejpq49xyFLio6cbhhi8eAFqrvAEJ288xghijiHHIfk165a233s" + "rsAFHMagafINkJdigF67561qfE6b8d39tyab9e16ejwByDbcjo3styrwdi0667rsejBHrw7d" + "deLM_023457bcegjklmnopqstwyBDEGHILM", + "6cnoch8eijFGdCqruzuA5umnABglafBCGHHNhGEFbhCIqvxyhiAFjIBCghejvAFKaz7dAFiH" + "34zAjoABBC787waflKhifkoN89di8dvwwx2823uviHdi8d34rxhighfgfkFK277cvw4tdjgh" + "uvch7cuzuvkJfk_246789cdefhijloqruyABGHIN", + "abdehGAB2723xybc7wcicBiowBafhiBHEJzErw9y01uzdi8dzFstazHNhiFG0pghGMiHrszA" + "EFvwEJwBBCxCmn05rwwBvwvAFGxD5a7cqvciqrCHsy01lmCIlK6cuvvw7czAwxrsHMfgxyvw" + "qraz122334nM01dCBGGLmL_12359aefhnrtvwxyACFGHIJMN", + "7w7c6cGH6bgmbgFGafdicikJGMnoEFdeqrdCpvgl34io5bbArsvBijFGBHBCCDDIuzzFEJbh" + "abqr129eHNFLbc288xazGH27stabyD237dBGdefk345bhipqafEF23pu5a05dCFGEFBC7dgh" + "BGuz6756EJBCzE7duz_145679cdefghijkmopqsuwBCDEFGIJ", + "KLvAFG28pvci7cEFqrbcwC5bbhbABGbgagFGfl8diodCEFGHGLBG12vwhnhi23gmnMHIEJCH" + "276vfE39zEuzgFwBcB7cinBCgm3sghlmfgrsdistghchbcHNioBHrsAB8dzAHNklABcBrwwB" + "bccdmn5blmdenooNnomnin_235789abfghiklmopqvwABCEFJKLMN", + "qvvBqrfknoFK3967nMAFeDBH78EFrspvqv8evAwBAFbAbgqw3sDIgh0paglKHNFKHIGHhifE" + "hnGLAGBG9yyDch3423BCbAvwvAABBCcd5bCIbc7caz7wjohmch6ccdxCabjI277cvwDI6vbA" + "tychhm4tch7cwxyDty_234689abdeghkmnopqrsuwxyABCDEFGIKLM", + "AB6vBCazhiAF068eeDCDdCBGGLiostABdi2805ghhiwB0p7dgFhGbAoN5a4twxbcuzdjej7w" + "jopq67cB78rs7dab12rwfguv3svwbg01bc12uvdiEK8956st8dcdfkKLfgBCLMbgdCglrwrs" + "KLin16di122312rwbc16vBpvdevBcB_013568abcdefghijknopstwyzACDEF", + "xDxC6v56gFHIcdxy7cBCHN0p6b78eD67896bhntyab6bvwwxnoFG7838xyBGwB3sdjEFpvFG" + "67bgnMoNhGgmrwBG05345b78vBxC05BCFKFGbgEFGHBGindivB67FLGLfE6bJK4tbginxy9y" + "xyxDglEFuvbg6bwxvw67wxGMFG_0346789abceghimopqrtvxyzABCDEFHKLMN", + "uzwxDIzE5bjIpuhicisx0phGGLAGbh7cabaz1278hnzF23bc27rsFKoNFG7dcdsyyDstab34" + "affk27tyfEvwbh890178MN05io5a5bbhchwCazdeBC67zErw7wuzEJzE7dyD23fl12cdpuuv" + "pqBG230pde89061617oN6bbggF6b_0234569abcdefhikmnoprstuxzBCDEFIJKL", + "ch56qwvBBCwCuv89fkDIst6vABgFcddedChnBCyD67nMhmlm5aazAB01AGtywx6bGH3s05pu" + "iHjI4tCD12CHFGhiGHij16GLfghivBxC23lK5ubgHIpugl5b56bgBH395alKmngmghfgnoEJ" + "IN16stjIHNsx23122rBHvBxCzECDuz6bpuuzzEbh_" + "0235679acfhijklmpqrtuvyABCDFGHIJKMN", + "bgrwvwqvsxuzBGzE01azGLvAxCqvHMCHiHnMhGglbgxD6bDIstabrxafoNJKAFuzFK7cfElK" + "qrrxsxfkhi67mnGHHNghsy78pq343sqrhirs23fE126vzEqrsy01uv5aci05qvuz01iooN23" + "eDuvcixCuz12BHqvqrrsqr05qv5avBBHzECHxCstHM9e49_" + "0457abfiklnopqrstuvwyDEGHIKLMN", + "676bGMAGABBHhi6v01uA0pdiABBC5aafbgabzAhGhnnMAGiHGHfEEJEK8dst38djglzEgF8d" + "785ursFG7wCDbcqw89uvGL06HNuzdizEbgEF8x8dvwfg38BGFGhGdiwxCIcBghxywxrw177c" + "6cvw27ciGHchhGjIghcdwCfg06decd7cGHhmhiijhi_" + "12356789adfghijmnoptuvwABCEFGHIKLMN", + "AF67fguvuAvwFKpu2756050ppvxD676cchAFbAABsx2rej8e8xwxwBcBciJKBCGLFLBGbcbA" + "hGghgFhiFKuAAFhmgliHbgmLfkhiCDjoazejBCjIABafghgl9ezAdj8eJK8dcd38kJxycBJK" + "KL5aAB9ytyxC0501oN05uAxy8dMN12BCLMjo233423_" + "025678abcefhijklmopsuvwACDEFGHIJKLN", + "qwABBCwCvwuAuz6bbg2327bc7c67ciab78pq67af6cbcab5acdFGhichfkflijBGGLnMdChi" + "wCwxxDDICH16bAwCBCBGFGgFghbhbAqrrsaf6bgmlmfl5alK8x78afnohi399eeDyDsykJfk" + "23CDafabagCH6b3416mnno9yty01rw2r12010ppqgmlmHNoNioGHkJEF23FGGMGH12gFpu01" + "EFzE0ppu0112", + "CHBHxCvAABhm27bhxyHN2316zFHICI6bpvEJxC3s399yhGhnbcchabafyDgldC12ej5aKLlm" + "jI7c23GLazGHkliHhGvw34gldi8echbcbg16vA6v6bqrty1qdediuvEF01GM12xyFGEFfEfg" + "io27CHxCCH38ci67xytyzA78bczEEJzEglzA2rbAbc7c78_" + "12345679abcdefhijklmnorstvwxyzABCFGHIJKN", + "16INuvjIHI7cqrAGuz5uejFGFK05wxsx5avA27agafijpqGMfkAFqwuvBChiciwChm23kJfk" + "ty56abvw6vpqvw39cdGHcBch8x1qwxvw9emLlK128dbc16hmvAxyEFAFdiin236bFKdi12qv" + "afqrbgwx5a38cdglrschstrsqrdeqvvwwx5601bg12uv162334231201cd_" + "0235679aefghjlpqstvwxzCEFGJKLN", + "vA6c7cqvAF5axCvAHMrw06sxstrsqrCHxCpvpqvBpupqrsqrpqpv6v161223344t786bbggm" + "06010pBGcddCBCBGoN7whifgchbcbg7cdeglEF27233ssxxy9y89vw6bLMGLhGhiiHHIINMN" + "hnDI67FGGHiovw7w676bbhchciiooNHNGHFGAFzAuzjIcdEJJKFKhnyDzEDIazabbckJEJzE" + "cdzAdCxC8xABBCABdezAzEEJ", + "gmlmGHbc67xCbgHNHIBC277c78rsfkqv38AFbA06GL6b3416klgFBGCDDIqr3sdjvAeDglbg" + "azabglsxstGLbhchGH4tEFFGcipuuz5bpqKLbcbAmLEFJKazCD6bwC67hmCDqvbhuvlKyDaf" + "jIrs9yxCgl2rCHhnHMvwxC5b78wx5a0605afrsghch7c7889syvw7cch_" + "012345789abceghijklmprstvxyzABCDEFGIJLMN", + "AFAB6bwCwx8x8dcdcBdjzACDDIFLKLFGAGEFBHHMGM5avAFGfgagAFbgbhhnmnfkdCzEazaf" + "glgllKFKAFvAvw7w17166b7dEJoN066b67uvuzINeDzE2723vwnocisxbhhn6ctyCHxCsx7c" + "7wvw6vCH4t89pvxDvwwxxDci78vwpvpuqvzA89vADIabqvxywx676b16qrst677wwxsxrsqr" + "1qbAzAazstxCINoNioiHCHxCxyyD", + "vAvB16xCCHBCdeejbgdiingl6vbAbggF7d78899eABFKdilKzAxyvBpvpqqrrw7w788ddCrs" + "56BGvBvw67fEwxio56FL7wstchaggm7cbc27bgag23af6b7cchci277cBHBGhGchci5u5aHN" + "GM01055upupqfkqruvafhGaz12rszAuzuvqvqrrs3s2312166buvAFuzEJzEuzEJuvDICDwC" + "vwvAAFFLGLGHqvjI49hGwx9yxywx", + "wBzAbc6bqwqrBGwBchhicd89mncBABbA1601wx12BHwBwC0pbgagafiHde8d9ysx5u01CDxC" + "05fk5a01rsrx28nojojIINMN6v2rchABstafzAvA6v565azE9eejjICIdCuv123ssx23AG12" + "0156ch7c675605011qqwvwuvuzzEEFgFgmHMBCCHHMGMAG9ehnBGINLMDIyDJK4txCsxtyyD" + "lmDIINMNLMmnlmlKKLGLBGcBbcbhcd4tJKno", + "pvghABwBvwuv782rfgBGfkchciwBBChncBbcdCrwKLwCoN16sx676vafGLrsCDgFbgFGqr7c" + "chhmJK0ppqFKstpu385aioIN8e78qwHMGMBCxDzAAGoN34BGBChiyDCDmLDIBC38ABABBCwC" + "7w676bEFfEafabbgejyDghEKGLfE9eijsxfg39wxvwuv3svwghwx39787cch7cej9e78_" + "045678abcdefghijklmnopqrstuvwyABCDEFGHJLMN", + "vwvAAGGHCH6b89hmgmbg1606pv12vA7838237cAFchciiHHICIwC7w6cGL34dCBCcB560pfE" + "lKKLmL5a8e01783ssy9y89wxCDhmno6c05ijchhipuvAzAuzpu8dhGzE5aiHchhidiqvMNHN" + "nMmnFGafEJlKFKFGAGzAzEfElmfkaz5a01057cinijejde8d38231201055aaffkkllm277c" + "6c6vvwwxrxCHrsazxCCD12qrrsstrsGHrs3s23121q", + "BGGMHM56wx67pvuvvwwBxDuzzE783839vAqv8diHdideeDDIFKcBvB6v67566bab277w2rch" + "hninrs5uuzaz16HNAFqrstrs05ijBHnMJKuvFKwxqvqwuvwxuzcd271qvBHNmLfEAFEFzEuz" + "6b6vvBABEJfEfkbgbcbh49glBCmnyDCDafdCBCcBionomnhmqr89di1q1601ab6b7cghbg7c" + "788ddihiAGstabij4tstsy9yvA672712jI6vvAAGGLFL", + "7d78qvdCCHmnqrrsuvuAlmnomneDjI898eBG9yhi12HMFGwCFK0616qvqwwCBCAFinCDCHGH" + "HMFGGLvAEF12gmhGFGAF3smLbA0p6b0167af7dstsx56kJfkdiin16675aij5u78nMchhiaf" + "12GM89qrfkqvij1qqr2r8ddCxCJKBCKL17fgFK01jo17ejIN9e7cBG3svAAF49LMvAMN9eLM" + "GLBCCI_02356789bdefhklmnopqrstuvwzACDEFGHIJKLMN", + "5aabqrBC56qvbhbcin67787cciijej7wwx8xCI1216890123nMjIABuzzAsxABpvBCABvBfg" + "7cpvmL7wpuiH5u34inCDEFuzFLhGchEJ7cghchhm5bzEcd17xC8ddiinuz7dbc176bEJfEfg" + "qrsxLMrsqr5ustglchxydj38bgpqglbg6bio27hG567c278x67wxCH78lKHNCH67wBBGrswB" + "wxqrpqqrrsdixyst_012345689abcdefghijlnopqrstuvwxyzCEGIJLM", + "AB1238232767fgKLzA7ddC8edeAGGHBH34EF677cbc3823gmdjuAvAqvpqcibg3s23HIjIjo" + "ioFGwBBHGHEJMNaghnhGLMFLmLgmgllK4tEFAFej06vAklvwvBwx565uuvbc9ylmbgag6c6b" + "vw166vmncd1223399eejdj8d8xwxqwoNjoej3423565afkaf5a560601122334499eejjono" + "mnlmoN_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN", + "AB5aazab7cafbcvAghbghiFGio8d787ccigm17AFqrgF89uAfk787cpukJvwejdjgl8x0pin" + "diiH9y899e8d05ghGHfgfkAGAF344989ghchvAEFcdAFCI8dGM23HN271701hG38dCBH5aFK" + "chABzAazzA2rBHfgfkqv6v124tbgHNuA3456165623di122301056b018d16126b3sbgdi5a" + "23gm3412affkaf5a_01345789abcdefghijkmnoprtuvwxyzADFHIJLMN", + "27chbhFG5aaf7ccBlmCIklfl78wxxCvwGLhi8934bA8x177dzAzEEF010p565ubhchdedCCD" + "2rhmhidicd12BGazBCxCwxqw16565bxDcBbAwB23BG12GMrwzAINyDvADIfEjI238dsx1qqv" + "vAzE3834xCzAvArwvwvAAGGHCHxCsxFKyDFGzE38wx8dxyiodiFKJKEJ7dioBGqrwxstrswB" + "BGqr1q17GLoN7wKL_012456789abcefhiklmopqsuvwxyzABCDEFGHIJKLM", + "6chicicB3416vBBGCHHIbggFAF9y27hm6b67bh78565a6738sx78FK7c8ehnbhFLeDwBwCxC" + "rxvBuvpupqqwzEBHvw5b6b06ABghbgCHBC2767EJiHcdbc788xwxzAcdglbgwCgF6bdjbAuA" + "ejfECI67cd8e5utywCafpu78bcxycd67iouz5ahiwxzEagijfkghuzvwEJ89hixy78fg67pu" + "56785a67gh786789iofEaf_012345689abcdeghijlnopqrstuvwyABDEFGHIJL", + "rwjIpuhmuzGMqvpqpuchABvAhGmLgl5bbAxC8x28277dcdcBwBiHklBGHIzEbhbgrsde23uz" + "hnbcqrFKCIiocdEKbcbhCDEJrshmzEuvqvch129eyDlm8e28277cchhGBGBCCD6v6buzBCAF" + "hiGHfg06pqvw6vvBvArwtyAFEFghBChipuioEJrsstEFrsFGuzazjoCDBCGHzAklABHNBCmn" + "jIrwvwzAnoCImnCDeD9e_01256789acdghjklmnopqrstuvwxyzABCDGHIJKLMN", + "pq06ijbhhici6cGMvAAFqrrssxABAG788xwxvwqvqr2rhGuvBCincicdvAiopuhnchci6vGL" + "jIoNghgFhG5a8ezAABuzBHxDafiohi8x8ddjwx8xstfEij9yxy6739iors2rwxklBGbcwBmn" + "565aab8d67qvEK23glbg56nowx38zEgl78pu89490p67mnJKBGFK56vAxyAFuz6b168dFK5u" + "AB6bbhvA055bbccBuz898dcdcB89_0246789abcdefghijkmnopqrsvwxyzABCEFGIJKLMN", + "zEaf7d1617786bfEBGBH67bguAwB6v6bMNDIGM2767FGhGGLKLlKglbghnchciBGxC8esxBC" + "xCtystsx0pBG8x5umn783sqwnomn5aBCJK6bgm06676bzAdjABGLwBBCrwqw6cpqpulK5u5a" + "agbgbAzAsxrsqrpq0p066ccddCxCrx8x899yyD34djBG78glcB23cd7c2723344ttyyDeDde" + "cBBGbgGL676b6778bggl_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN", + "5b6bwCbg7ccd05zAzFCH0p27bc7c67glqwbgci16566bab34mLgFwCbcGLrw1623pqvBBGoN" + "wB9yGMcBwxFKHNjIBHvBCDnosxHIuzxDjo8est78mn3sgm12gFhizFiHuzABBCBGAB5uGLzE" + "ej050p01hivA05ghfgpqqvaf9e5a499evAhieD0501ijzAzE12fEafhigh23CDABEJzEBC12" + "01ABzAzE055a050112vAEJvwwx2rvwvAxy_" + "0124567abcdefghilmnoprstuvwxyzABCDEFHIJKLM", + "vB5u566cqvaziH9y5ahizAghty0605afCDijwBABBCFGeDCDdC8dAF8x1q6cabagchhigFAB" + "FK7cwB899e27zEbcvB237c3s49GHBG78fkvBghejjofgbgJKzAiobcwCoNhmlmglqw277cCI" + "ch7ccddCwC38GMHI78GLbAAF67KLpubA27uAinpupqdi899yBGrwwBrwmn6b8dBG38dihmmL" + "ioAFchFKFGci16EFEJEFAF1216mnno6cmn_" + "012356789acdefghijklmnopqrsvwyzABCDFHIJKLN", + "016bBGcdvAGMbg17560501BCjIejeDCD6bbc12abdiflgl7dijdj232778bhbAAGBGcB7cBC" + "wx8x78nMCDxCxy2r898x16af5acBfkghzAAFINCH12hi3s6bHMioqw23af5awBgh78gm0pLM" + "DIyDstrsqwqrrssttyyDDIINMNLMGLBGvwwxvwvA565aazzAbA67dizE78kJfkag1627az7c" + "565aEKaffkch56065a01055aaggmhmch6c8d388ddici7c2734zE122312uz166b34bgpuuz" + "zEEKlKglbg6b1601", + "vwwx8d1606BHvwwBABAFcBABuAuvpv0p055b1quzingmabagdirsrw7cAGLM127c7wwCBCvB" + "vAbACD1q2823uv9eqvqwiooNjoejdedCCIFKAG3427mnnonMAFFLLMGMvA016cci399ysysx" + "inwCvw6vuv5u5aabbc01066727azzEfExCwxfkfljI7dabEJazab565afEaf5atyyD56sxDI" + "67898d78di89dj7dDIyDtyst3s23277ddjfEinEJ_" + "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN", + "6bbgwBBHglrwqwpq1qBCwCvwBG676bbcbgch27rsrwCIHNHM67wBLM78iHHMnMxytystrsrw" + "6bdihm67wBwx8x3823277cABzAwBABzEzAAFlKstmLflazlmEJafmn56iono34ABoNkl7wlm" + "hmchcB6cGMcdcikl8ede786cdCrwwBABuA5u56677838344tst8dABCHCD89BC78ABzAAB67" + "565aaf5a566778899yyDCDBCABAGFGEFEJkJijuzuAGHiHijHMGH_" + "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN", + }; + + solutions["Cuboid grid 4x4x3"] = { + "14g:25h:36i:7j:58k:69l:7am:bn:9co:adp:beq:fr:ds:et:fu:v:hkw:ilx:jmy:nz:" + "loA:mpB:nqC:rD:psE:qtF:ruG:vH:tI:uJ:vK:L:xA:yB:zC:D:BE:CF:DG:H:FI:GJ:HK:" + "L:J:K:L", + "9aaqwx899a59hx7bEIAE37wx48rv23xBkAgkzDDH676a9a1289HL26hivLuKab1hhx9a9pKL" + "2iiy2i_3789abhquvwzBI", + "04jn019apqmnlmxybfyCijiy7buv26CGghGKBCtustCDlphiDH56EFrHosptFG89uK59GK8o" + "ij2i9dhi_2469afgjnpvxBE", + "kAAEBFxyae4k9ddexByzDHpFefBFzDabHL9a59AB5lxywxFJmqde2337tJ7ndtnDimwAnr15" + "12hi1h23_29abdlqxyzABDE", + "56ijBChi4567hllp3j481237hl049d01koCDim9pmqhiijkl8cptqu7b0g6muKgwcspq6748" + "26qr2inriy2i_023679bhijoptB", + "opIJ12EItJKL4559AEBCIJGKKL26CGxywxqG9pzDko9aaejzwA6a153jEFaqJKFJAEvLpFqG" + "fvosmC6mgwoE8oKL0gvL_124koyABDEGIJL", + "qrmqlmFGEFGKklhlhiGHlmrHmnAEgkyzbr67560gzDpq59xykAJKDHlmop4kqr8oHLpqopkl" + "uK9dxBDHnrgkBFFJgwtJ_7abhkmnqryzGJK", + "yCCGaeCDmC56mqlpkllmrH2337qGzDrv9d2312os6aaqpq264kquABqGGHabDH5l6a1h56ko" + "9ppFBFghgwhxFJptFGaegkghCG0gzDxBGKab12EFBCCGEI2312_" + "1267bdefghklmprsxyzADFGH", + "FJhl5l67BF6a6m45nrxBjnxy04ae7b48IJbf7bABJK1537rvlB6a9anrquuK9duvnDEFwxtu" + "rHeu0412lpbfFGhxcdwABFHLkA9p671hdehl6aef12aq9d26lB6awx_" + "0123567acfhjlmnqrvwyAEIJ", + "qGghdenr4kKL7b6abr12xBwA01eu150gmqaqabrHAB23FGgwimqu37qrhlbf7buKEF12pqmn" + "yClmghnrcd15lBAB599pBC8c15rvEIpFtJmndtlpptvL483jlB89lpopos_" + "013467efghijnqruwyABGHJK", + "26stpqfv8cBCimABijkoqulmosjz9a122337EIimEF7nIJtJ37CD9puKBCbrmq01tuhl7b15" + "AECDptqGDHstlB9dqrklwxFGpqlmopsIHLos4kopklGHhlFGhx1h155915_" + "012abchijkmnopqrstuvwyACDEHI", + "uvhlIJhiCG9aijGKtJae56efmnqrdt2iptyzlppqghbrqGhixywAxBFJBF7baqxBEIGK01op" + "1559xyoE45GH8o6mFG9pBFjzFG3jaeijqGaqhiijwxptaedegh04hltJABJKKLJK_" + "035adefghjlmqrtvzADGHIJK", + "eu9afvnDCDqrFG23rHrvxyquqG8oBCEFFJkltJ59CD5lEIBFuvFJmn6maedtgk8cmC56484k" + "kA6ahlab2637qr89bfEFlB6mABAE7b9abr595lkl15BCeunDCDrvoEvLBClp37ab7n_" + "23468acefgknqtuvyABDEGHJ", + "hiBF1hGH23quijtuoppFFGqGIJCGdtqr2ilmyC9ppttJFJlB9p595lABmqAEsI48lmrvlBhx" + "hiqr4kqumCmqtuCDim9dhxaqefwA8c2imqeuAEdtbfefCGgw48EFFG9d4kghhiEFoEkAgh4k" + "8o_13589cdehiloqrtuwxyABDEFGHIJ", + "23yCabAEbfmqop12pqpt01st9a599d48aqoEGH9p898otupFkode04FGcdptKL6mfv9p4kEI" + "EFCGABhlyzFG89kA04pFjzsImnefopGHdeyCyzBClpFG48AE89GKKLpFlmFJkA3jhleughgw" + "FGAEuKBFlB_345abcdhmnoqrtuxyACEFHIL", + "CGkA9d9a4k6aEI8o6m8cptEF2612BFhxqrpFBCqu89nrqr9a9ppqqG6a044kiy2i48ptyC04" + "6789AB9ajnpq7n9dstJK56dttJzDBF9dkoyzkAjzgkDHbrko67op5lqrptsI9p56xBlBABwA" + "AB7bpqpt_026789abcdfjmnqrtuxyzABCFGIK", + "stdtlpklkopthi4889GHqGtJijnr2iaqjnbfpttuqusIqGlpyCKLuvst6701wx568o5lxypt" + "nrcdJK12EIhl23IJAEde7bcdmnJKhx12mC6mimkl9a37wAab6aef3jjzaelm15459a3jkl56" + "mC9p376m_024578acdfghjklmoprstuwyHIJL", + "ptghKLhlyCmCzDlmmqlpDHyzlmiy48uvbrgwklHLjzwABF56CG6mFJIJDHJKtJ3jquuKrH45" + "gklmzD04lBCDpqopuv264kfvnDpqBCBF8cstcs48EF155l4kgkmqgwBFlBwx4812ijiylm2i" + "xyBCijBF6m12wx_13689bcghjklmnptvwyzCDEFIJKL", + "xypqGKlmBCKLptCD48kl01qrlpnDst6759044kEIlBhlhx89CGFGefnr5lABpt9a59JKyCko" + "CGgkiyoEyCqGjz89GKde15efaqzDkoEI9dpF59fv4k453j23oE56599ppFvL9p4k59ae15fv" + "1h011223017n_0146789acdfghiklmnqrtuvxyzBCDFIKL", + "mqabij01br12stfvpq26KLJKgkop6alplm5l04os0gCD48GKefdeae9apt9pxywxcdqGtJko" + "6alp1himhxCGIJuKuv9agkmqquxBij23lmlppq6mEFmC9d2iopdt8cfvnDwA48pFFJ596m56" + "8c599pAEpFDHpqHLqrbrqrpq_012359acdfjklmnoqstuvyDEGIJL", + "yzjzFGBFim8o678c89mqquqrGKJKtJptpq566a9aefCGwxCDyCaqde7ndt5l48nDeu37klGH" + "gk047byzxyHLlBko7nopqGbfmn89GHxBmCkA9p6mbruKpqko15opyCGK4kCGGKquptimiy7b" + "yzuvko37qumqqu7bopoE_2356789acdfgijlopqrstuvwyzBCFGHJK", + "04oE0112237nIJ7bmnaehi0gimaqgkiyghrvqGhivL3jopospqsI266a67qustosjzrv1h8c" + "gkqr6mkolmhllpeuptABfvJKgkpqFJAExBwxgwJKoslpuKij89hioEEFhllpuvghqu8olB9p" + "8c5l9dnreujnaemqimlBxBnrmqpqBF_012347acefghilmnopqrtuwxyzBEGHIKL", + "9axybrgwghhxhlhiimlpquEFefmqijwAAEhl1hde56BCwACDABIJFGEIBChiCG89fvoEKL48" + "tu5lyCGKquEFzD04FGhl26koGHvLst67xBnD9plp8cDHlmrvzDpt595llpnDefoE9d56hl59" + "nrhilBae8o8c151h15xBBFxB56rvuvBF_05789abcdefghijlmnopqrsuwxyzCDEFGHIJKL", + "FGKLbfae5659pt678c9dEF56qr0145brbfAElmmn7n676a9a59klhxwxwAkAgk12mq04csmC" + "lmsInDoEhlGHaqstos8o8cjnoEuKFJpq1hEFAE23kADH48gkkomqghHLhigkGKdttJhx0ggw" + "qu26dteuim23qu7nDHnD6m01pq7n9p_0123456789cdefghijklmnpqrstuwxzACDEFGHIL", + "opwA8c488o159d599p56ptuv45qG4kqugwfv01lp266asIFGwx48mq56hiuv6mmnghqGxyJK" + "3jcdwx26oEopnDdtuK12kAqukoGK01jnjzosIJmqimwAEItuAE37mCIJabCGwAptGKKL1h26" + "JK6adtuvvLFJpF26FJuvbrIJquae3j7b23ab9a899a_" + "0123468acdfgijmnopqstuvwyzACEGHJL", + "lBFG89qrAEuvko9dae9p9aabbffvrvrHGHCGyCiyimmqhiopoEEFJKhx56GK67044556262i" + "high01047bCG488ocskAwAgwAE3759stsIIJJKGKqGpq7nrHbrrHEF4ktuuKKLHLGHqGpqDH" + "aqpq9pzD6aptGH9p26FGeuEFoE45uKijnDmqmn67nDjnmq677njnijhigh0g0445aqim59qu" + "mqimhi", + "ghgkABkAkl45lmcdij89mq04lB59dequ48koimosmqeffv4khigh0g2i9dhimC6mvL26sI6a" + "67bf3756gw3j67iyyzjz59eu7nGK2iuK37hxcs6aaeeufvaeCDyCCGGKuKtu6aghCDbfqudt" + "7b37mqosiy7boEos268c2i0104immq48040148qu_" + "123456789abcdefghijklmpqstuwyzABDFHIKL", + "wA9pklkAABFGCGBCAEzD8clBwxmq56rv155llpptGKxytJrHDHnD01de8ooEyzbrCDtuefBF" + "strHlp7n6asIdeeuuKcdxBmnBClmABgwrvlp1237GH8chxxB26xy0gBFuvpFiy2iCDptgwcs" + "tuab7bmnbr9adeeu487n0guvdevL9drH9a6aab8c480448_" + "0136789abcdfklmnpstvwxzABCDEFGHIJ", + "lmyz56zDwxABghxyyC67CG45hi5623gwijklgkgh48lppqqrnrjnjzyzyCBCop12ptpq156m" + "3jhx04596arHIJJKKLDHtJstjzqG9d599ddeaeaqqrnrmn6m265lrvcshlrHBF45CDmC6m56" + "454kkllppqqGGHbfmqHLfvxBosDHHLCGuKzDquDHuKopvLKLpqJKmqEFEIIJJKKLHLDHzDyz" + "xyxBiyyCCG", + "564567KL485915bf9duv9plm26hllBlpopoEEFrvijgh4kJKCGwxAEqrEIcsyzyC04GKIJmn" + "mCCDde7nCGpqJKiyhilmaqDHKLimGKefmCsIwAuKAE453jijCDeu6mgwdeae9aEI0g7bAB37" + "uK7bimxB1223121hghhxxBBCij59CDnD45ABmqpqpt_" + "013456789bcdfghijlmnopqrvwxyzABCDEFGHIJL", + "56ghop45abCD9a6a56488c12xBBCyC010448gkJKFJyzhiGHxywxwABFEFoE8ooscs9pFGmq" + "delmmn4krHDHghgkklHLhxsI26121hghgkkooppttuuKGKCGmCaezDkAcs6a4kDHzDlp67yz" + "56uvdtuKeueftJ5989xywxwAAEoE8o899ddttJJKGKCGmC6m677njnjzrH5926brhiuKsIae" + "6a26ae233jijeuhihx", + "oEFGuvEFlmcdyzmn6a3j56klEImquKuvquqrbrimcsGHrHJK5lFGhiuK8c59ijpqgwfv9a15" + "48mq455604kAosim67xypt2iCDkozD89lB569d9a59abos8o5lnDuvghhi233jgkdtijkAbr" + "AEhi1htuuvwx12oErHptlBwA8oBC4kkA4kdtbr48abBFEFEIEFxyyzpF_" + "0234679acdfghijlmnorsuvwyzABDEFIK", + "wxCDabtuBC566723stptop01266aCGyCxyxBABAEEFijhi2i45pqqrmqaq9d59yzlm6m56cs" + "hluKuvrvnr7n3723121hghgkkoossIIJqGEI4kaetJwAgwgkbf7b676a236a37BCqu9pbfpt" + "oppqqrGHFGoEop9p89CDBClBklkooEEFFGGHvLqurvvLHLhlnr7n372326fvlppthlhxxyiy" + "2i266aaededtptwxvLfv", + "immnjn01BCKLGH23EFABef67qrhi045612FGxyghxBim6mlpBFpq01bfCGEFGKhiqG59CGae" + "EI4k48CD9p9d0gos6aFJnD4kae7nlB6agkghhlHLkolmlBBFpFpqFJ37oEbr3j1hijiyopqu" + "8c8ojnzD56hx4kDHimrHmq7b67uvzDquqrpqnrjnmqvLrvxy6mim2iij_" + "0123456789efghijkmnoprtwxyzABCDEFHIJKL", + "ABEFoEBFGHxyBClBJKpFyC9p8otuptpqopmqpqlpbfqrjn3j37mCCDnDkoKL45gk7b6mij3j" + "jzosEFEIFGHLkooEEFBFlBFJwABFptlBjn4k2iuKiy04kAyzdtmqBChx7nAE45678c01xywA" + "2iEIaetu4k1256pt48yz23121hhlaqjz04DH48lpjn8cnDDH67GH7bbf7b_" + "0235789cefgjklmnoqrstuwxyzABCDEFGHIJKL", + "45IJCD48kllmko12BC8cmnAB89kllmhxghgkkAAEoEoppFFGCGBCqrklhl1h0104nrGKBFde" + "CDCGGHJKFJFGqGqukoxyyznDwxKLwArvvLHL26kAFJ6adtxB37kolBabptwxlpimmqqrimos" + "pqqrbrophlaq8clpEIbfpFpttJcs7boE8c8ooEEIsIyC673723ijiy15yC59bffvij3j23CG" + "GKBCqGaqde9d59566a15010gdt", + "ijimmq9aAEBCCGEIyzptCDjnjzdeos89dtlppttuquqrnrmnhlJK23tJef3j12482ixy6mmq" + "KLim7nhi01vL5l04cdmn1hlmFGrHwAwxfvab9a0ggw01koosstdtdeeuquqrnr7n37231201" + "0g89klmqlmiycdimjn5lablBmqmCCG9pGKFGBFpF9p8948455lrHHLvL6mnrGHoEFGhx8oBF" + "FGGHrHnrjnijiyxyoshxkooEAE", + "2389CDmn9a9ddebrDHIJjnmqKLimpqqrGKAEpqABopEFxypFlBoE1215mC6mwxfvpq45xBmn" + "quFG5luvHLtustvL48eflm9pnDuK7ngh9ddeef56fvjn04vLHL4k0g45GHtu59233jFG9dlB" + "gwkocs23st5lsIossIlBgkmqjzBCBFcsij59ko9d5956BC6m0gimijmqpqoppq_" + "01235689abcdefgijlmopqrstuvwxzACDEFHIJKL", + "de4kxyklwx3j67BF898cyzIJFG6awApFFJtJ9a9ppqJKGHhighabyzjzijgkkobreuFGrH26" + "378orvkA59156aCG7bkostosoppFFJxyCD0gimoEABtufvBCjn6mnrnDCDmCuv6mtuAByCop" + "26vLptdt59CG9dpFyCEIxygk6756hxAE67ghEIgwwAaehighgwijEFAEfvEF37_" + "12346789abcdefghijklmopqrstwxyzABDFGHIJK", + "xymnlm67BClpopkoABhl56pq676aab5901121523124k7nmnmCBCxBhxhi2i266aaqqrbrpt" + "BFst6mlphlptpqwAmqpFyzCGyCiyimmqkl9pdtlBBCABAECD1hGHFGCGBCgh89pF9p89hiJK" + "GK48GHrHnr3jjnpq37quuKeutJKLuKcs7bstcs12tu8cuvfvuvstosuvrvnrjnijhihlklko" + "ossttJJKEIIJJKKLvLfvbf7b3723121hghgkkA04", + "pt45EFEIIJFGlmtJhiGHqrlposmCBCBFEFoEoppq7n67CGquijlm59gkJK9apt9p9daeFJ15" + "HL3jAEimEIAE9pmqijmC37lmhl9p9aaqaepF04899p8o1hmqkoGHGHDHnDmnmqpqpFEFaboE" + "gk3j8c8ofvqruvoEtuEFBFxyyzxywxxBBFFJgwJKIJIJsIsttuuvvLKLabbffvrvqr7b_" + "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL", + "15JK56GK6745aqmq6myzaeimGHlmabdt48klxyqG599dpqmCBC6m6aaqCDiy2ijnnrptqubr" + "ABBCmC6mjn7n3712pq9auv23dtnDabjzxBwx127n7bop3jxyIJimBFwxJKEImntJKLkoyzlm" + "jz9ast04ptAEpFcstu8cptuvlphllpptstcs37ef45dekA8c8959cd7b8cef9pbf8o7boEpF" + "8o8c9pcd_01345678abcdefijmnopqrtvwxyzABCDEGIJKL", + "AEABstFG12BCGHhlEIwx1559CDwAgkAE5l45488oopmqpqqutusIim04hxoEklmqhlghgwwA" + "AEEFBFjnFJFGuKrH6mcdBCmnnDlplBBFcs9ddtptmq6m56599aae1hqu6aos01235lhx26hl" + "iy6m7bHLDHCDimiyyCaqaqaeeuhxjzmquvqumqxyhx2612010gghhllmkl23HL4k373j377b" + "_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL", + "GKJKlp67xBxyyC56CDlmmnimhl9d155l599ppqmq6aDHlBkooplpzDaqwAwxhxghgkos7bCG" + "56266m677nAEptlpklko8o8ccstuqupqoposst4523qriy2iiyyCmCGKuKqumqlmhlghgkko" + "oEEIIJgwHLlmGHwADHqGrvvLHL8opqnrmn0gnDbrbffvoEgw7b3jlm8o5lvLwA377bjz3jjz" + "_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL", + "osAE1567EFkllmFGcdkowAqr3756ptmnoElp67ABkA6a9aimkoabBCBF9dCDyCiyhighgkkl" + "lBhlpFtJijdt5l59676aaeefbflBkl12018c3jnr0gmCop6mim2i1215csbr6aaegkklpqEI" + "GHCGHLjzEFqurHij56jzmquK9d455659GKqG1hhx1hqrnrmn15FG59379d6ade6mlmeufv7b" + "bfim6m6a7b37_0135679abcdefghijklmnopqrstuwyzACDEFGJKL", + "kl677bablppqghgkkoiy569dhi592337678c12mC9pqrquuvkABF01lB6mhlaemC2ipq3j04" + "48yC154k8c89wA6mJKDHcsbrrHAEHLuK9a9ppFEIFGlp7nwx23imEF5lFG15rHfvpt12pqqG" + "aqnrmqrv01quCGrHmqtJimGKwAyC23xy1hhxzD45dtwxyz561hBCCD45sItJcsij7nsIxyyz" + "hxBC1hhxkA3jwxijwA_01234567abcdeghijklopqruvwyACDEFGHIJKL", + "6agkkoGHhiAB45AEEIxyhlghgkwACD6756595llBABAEoE8olphllmmnjnijpt8cyC9dCGGK" + "dtaelB9a6aaq2iuvhi2i233jHL7bbrnrtJDH12sI48vLHLGHqGqumq8915454889ghgkjz9p" + "04uKkAcs7nlm2imnjnijhihlmClm454kkllm6mmC597nEIptnDeuAEbf7bEI679p26677njn" + "ijnD15uKbf_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL", + "9aBCabbf0104klIJFG89fvBFrHqGkA12EF9dHLptGHlpjnklko8o484556iy4k455lhlgh67" + "GK56CGCD01wx04BCde8czD48tJpq2iosGK45hl1h15qrpqbrFG0gij9dEFuKeuEIoplBhiij" + "1hlposnrjzcs23BC3jxBBFFJBF59CDjzxBhxqrqu1h8cmn1559mCqrmn9dsI011201cs040g" + "dtptBCstABcswAstABBC_012345679abcdefghijklmnoqrstvwABCDEFGIJK", + "23pqklkoop45hlhiwxpqpttuxBijkl5l5615126aab7bqr9aBFpqxyhxhirvkoop9p8948wA" + "lByzEIqrkAkooEhllpwxwAosstptjzlBmnqGbrab9a9ppqmqmn15oE6m9d3jrHjnaeeffv8c" + "890gnDnr59vLrv8o9dqudteuFJqupqqrnrmnimhighgkkoosAEpFBF6m15hxxy3jxBBFFJIJ" + "EIAE0gkAnDkooscs_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL", + }; + + solutions["Path, N=5"] = { + "1:2:3:4", + "233412", + "1234_24", + "12230134", + "34011201", + "342312_24", + "011234_014", + "012312_023", + "0123342312", + "012334_024", + "122334_014", + "011223_1234", + "23122334_13", + "12233401_013", + "123423123401", + "23120134_134", + "23123423_012", + "233401231223", + "340123122301", + "341201231234", + "341223011201", + "3412012312_24", + "01120123_01234", + "0112233423_013", + "12012312342301", + "12231201_01234", + "2312013423_034", + "0123123423_0123", + "122301341223_01", + "2312011223_1234", + "233412012312_34", + "2312011234231201", + "012312342312_0124", + "013412230134_0134", + "230112342334_0234", + "12011234231201_0123", + "12342301122301_0124", + "1223011234233412_0134", + "120134122334231201_0124", + }; + + solutions["Path, N=10"] = { + "1:2:3:4:5:6:7:8:9", + "4567788934122334_156", + "126756236778673445_156", + "233445566778895601_126", + "675678674534231201_567", + "5667012312780189_035678", + "23344512560112233445_023", + "1234453423017834455645_012357", + "6745897834566778894534_356789", + "014567783456678912782367895645", + "125634452378346756452334_135689", + "1256674523342345122356_01345679", + "122345011267347856674556_0123456", + "124534677845566701234534_01345678", + "78341201452312342345566756_12345679", + "4567013445567867564534231201_01234567", + "23345678671223341278564501348912231256", + "4534566723563445566712342312011234_34567", + "6789345645785667342312568945342301_35679", + "7889126756237834120123675612234534455667", + "01671245347801897845566756452334_01234678", + "34786701234556341223451289677867_02346789", + "5601786778891245233456452334561267_024568", + "452301127867892356014567786756344589_03489", + "34128923455634786723127856014512342334451201", + "89017812453423455634675645563423120123_03579", + "1234566778450123341201566745892356344578348923", + "1256786778012312564534894523785667561245017834", + "233445561223340145566756122334455667786756_012", + "2389122312785667564578344523561223016756_13569", + "5667781256011234453423893467455645342367120178", + "782312450134238956126745566778346745566745788934", + "566745342301128978566745564534452312011223_056789", + "3412675645782389675667783445678956672312342334_1456789", + "783456896745231256677867342334014556671278894523342345", + "2301677845563412234501566756123445562367347889_02345678", + "34566745123423344501561245342334455667786756458934_01345", + "456723566734011289457867566745342312011278233445_03456789", + "457889345645675634234534451223346745786756453423_12345678", + "785667785645893412011278236778348956455634231201_01345789", + "6734453423561234457867013456786723128901786723563445342356", + "78125634231267455645897801673423564534894523125623_1345689", + "2312568934011245562367567834453412562312236778893478_0125679", + "45564567342378456756671245893478450167566723124523342378891201", + "01126734015667452334237812568967786789564534566723786756_0236789", + "56674578564589342301456756456778893467455645673423122378_12346789", + "671234453489452301785634671245563423346756784556893467786756_0135679", + "348978452334451256012367781245344556672389784534124501126723_01235679", + "45342356124556346778455623456734017812562345345645568967786756453423120" + "1", + }; + + solutions["Ring, N=5"] = { + "14:2:3:4", "042312", "120134", + "01120134", "0423_0123", "0104342312", + "012334_023", "123423_124", "3404011223", + "340423_014", "013404_0124", "042312_0124", + "231204_0123", "01042312_024", "041201_01234", + "12230112_013", "34011204_023", "34042334_023", + "34122334_234", "341234231201", "23040104_0234", + "34122312_0123", "04011201122334", "3412012334_234", + "3404233401_0124", "340412012334_024", "041201043423_0134", + "122334040112_0234", + }; + + solutions["Ring, N=10"] = { + "19:2:3:4:5:6:7:8:9", + "67122378_168", + "010934897845_123", + "340945564567_036", + "890178122312_029", + "457867340956_01568", + "89561267237867_159", + "56674512013445_24569", + "12458901564534_123458", + "45096756897867_023579", + "7889016778346756_0145679", + "1201235645126756_02345689", + "09122389347867010901_01245679", + "23897809897867560145_23456789", + "093401231289786778233423_02479", + "566712230134894512098934231234", + "780901456723563445238978_01258", + "8956670989455601123478_01234568", + "78015634458909672378673423564556", + "78670112897867892309568934_06789", + "786756344512233478560112_2345689", + "78895609674512235678346745567845", + "01094567342378128901120109_1356789", + "230109342312564523344523_0123456789", + "455634091223453467122389783467_01456", + "451234785601674556342345091256_123468", + "456701567867897845340989782334_034679", + "67013445120901091256236734_0123456789", + "781223896709125678348909010989_012678", + "01561267784523340109011223344556677889", + "097823896701122301560901120109_0135789", + "56236756013412018945782334562309675689", + "892309017809890934784534120145_0125678", + "092345895601781267788945342345_01345678", + "34098967455634011223340989788909_01234578", + "45122334120956786756457889015645_01234578", + "78341267237834890945786756677889_02345789", + "1201893423457812563409230112233445_0124569", + "234534785623891223013409120109897867564534", + "786756238909017834671223017812092356_345789", + "453456674523785612673456455623341267_01345678", + "897809458978235601450901344556675645_12345789", + "8956346778455609230167093489677889090112233445", + "09017856896709234534124578564501455667788909011223", + "011234235667786712095601122389455667786756_02345679", + "237812010956234589096734231223566734564534231201098978", + "12014556670923344556677889120123093412014523122334_23456789", + "0145788912095667788909011223344556672312097856344556677889090112", + }; + + solutions["Wheel, 5 spokes"] = { + "145:25:35:45:5", "04123523", + "010434_034", "1512233401", + "253415_135", "353423_134", + "4505122312", "053512_0235", + "120523_0235", "151201_1234", + "010545_12345", "01250405_012", + "15120134_123", "23122325_135", + "23123423_012", "15120445_1245", + "23452515_1234", "25451501_0125", + "35452505_1245", "452512_012345", + "05341204_01234", "05342301_01234", + "1523123423_134", "3401231204_023", + "34052505_01234", "45053525_01245", + "04351501_012345", "0523451534_1245", + "1235042301_0123", "12453423_012345", + "1535011201_0135", "23341223_012345", + "25053545_012345", "34040515_012345", + "34250504_012345", "3512013423_0134", + "013423120104_012345", "1223340401122334_02345", + }; + + solutions["Cylinder"] = { + "19a:2b:3c:4d:5e:6f:7g:8h:9i:j:bjk:cl:dm:en:fo:gp:hq:ir:js:t:ltu:mv:nw:" + "ox:py:qz:rA:sB:tC:D:vDE:wF:xG:yH:zI:AJ:BK:CL:DM:N:FNO:GP:HQ:IR:JS:KT:LU:" + "MV:NW:X:PX:Q:R:S:T:U:V:W:X", + "yICMuDIJOP56blwxpqsCxyajCDisWX45IS34yInoop8iuvuDtDjt3dISdnSTTUnxBCstcdrs" + "uDdnOXnoVWmnxHLVAB2cvwlmoy12eoBLisrBsCBLKL_68alnoqtuvwxyIMPSX", + "JKHIKLMWCMsCijhrhiisuvghoprB568i0967pqoy45vw34stLMyIwG23fgqAISMW2cyzGHwx" + "opBLHR7h9jfphipzijnomnzJnxtDOPisghPQsCpzdn3dlmjtfgefeoCM_" + "0568hjpqruvAGHJNOW", + "56KLxHcdyzopabVWxy78ENGHLMoyMN3dwxJK6gWXvw01opghABpqBCVWxyqruDUVCDtD12uv" + "KUzJjt23OXENCM34sCpzuEEO45dnhr5fisrssCfppzzJCMkuJTEFENklnxxHHRnx_" + "0358bdpxzADEHKLNVX", + "56WXbcOPrsCD67MN09LMjt789jnokulvlmBCuDuvxystwxdeKLvwklCD8iuvakuEklrsPQqr" + "efisijqAopfgghsChr0ahiktakkuJKAKGQktIJKU0arB01wGmwIScmBLwGDNENLVEFENFP_" + "059cdjlmnrtuyBCNOW", + "RSmnklSTlmoppq78ktkl9jstkthrisJTUV01gq1b674556BLnostLVghis45tDST343dLMwx" + "BCABuDzJmnCMvwuvopdnqAdepzefnxRS67vwgqQRPQxHuDzAOP5fDNMNAKMWktakKU6g0a_" + "0489hiklmnoxBCDJRU", + "34fgij9jBCMNjtLMCDQRghhiyztD562312ijEFABrBgh34fpGQkt1bajwGakpz01IJfgkuij" + "HIak12hr4eKLisxHeomw6grs23rBbl34oyFGcm2c89nx09gqqABLAKqApq01gqKU1bbccmmw" + "_024589afgjyABEJLNR", + "uvOP01ijeouENXEN23IJop099jghpq4534efhi67ijHIaj0aEOfgghhinoGHijhrop12FGhi" + "rBMNvwlvblLMqr56JKrsgq45sC1b4evFablvijdeqAAKKUajIJdnghuvuEpqvwfgHIwxGHFG" + "xyyz_1357aegijnovwEJKNP", + "STrsABcdop23OXRSJKeoOPpqfp34GQ12KLbcab67IJnomnakaj5f45zA23lm56uD45JKFGqr" + "yzpqopku34klEF452ceo5fuEHIcm45AKqA4emwGHfpLMwGuDCDGQkumwMNcmDNeoakuEEOtD" + "uEblku1bjt9j_1247dejpqsBDIJKQTX", + "kt78STpqstqr89rsISstpzCDyIbc6709oy56jt34OPgh5feo7867BCab56CD8978ABuDfp67" + "yzuvOXkl4exyeffgfpqAop5fde4ezAfpyzpzWXwxVW5fktxyghst6gpqhr8irBhroyBLsCxH" + "isBCsCCMGH8iMW_03589chiklpqrzBDPT", + "devwrsqr45hirsabpq565fSTKL6734uvefuDdnMNTUajHR56stBLIJnx78ijxH3dRShitDrB" + "LMhrdn45sCstSTuDpzzJdeQRJT8iyIuvvFKLnx4e67CMMWxHHRISJKFPHIxHnxdnqrqA78AK" + "eocd2cKUajoy89mnmwnodnak3d_4567befhqstwJKNRTU", + "122345pqeffg67QRef567h34MN09yzPQCDVWUVENhr67zAqAGHBCabrBOPktqrBLghklMNuE" + "lm01mnENstktdeKUrswGMWuvbc12gqlvEO7hcdhruEblkl1b7h6g23cmisku3ddnrBbl4eak" + "eomwBLnxVWUVwGLVdnTU3dmwxHBL_1469aegpstuyDEHMRW", + "UVHIyz01xyIJwxOXvwxHzAwxnxajPQEFqAnouvvwQRJKABENfp78HIIJHRTUWXMNVWRSLM67" + "lvKLkuabLMBC09xH89AKLVHRGH78CD56blFGPQEF7hJKnx45op4eDNJTOPOXQRhrrBpqBLPQ" + "lmgqeoLVOPNXpqDNdnmwBL3dNXOXWX_18jnpqvwyzFHIOPRSV", + "ABSThr7hrsIJOX67uvzAENuD56vwopno67yz0178kl89zAdnlmlvOPCMRSfgGQ45abWXyIEO" + "qAHIMWTUCDktBCaksCxyktST1bvFzAFPCM6gUVVWnxisENMNvF09sCblrBmngq67ijlvENyz" + "vF6g78cm34ABuDLMzAkuefblmnTUfgyI9jISqryzUVghhighyILVBLLV_" + "057befhkmpqrstuvyABEIJMNOPQRSX", + "12EFCDFGvFvw2cNXCMghnohiDNcmrsOPop89tDpzzAijCDeoaj9jnoRSQRopWXmwGHmnhr01" + "wGfg7h1bblKLFPOX4ecdOPmwqrVWKU8iGQpqnowGHIBCENGQBL67OXAKvFqApzmwzJghpzuD" + "gqpqvwFPWXefOXopkunode56AKcmDN6gmwOP2cOXVWnxxHmncdrBvwdnCD3dWXOP_" + "01789deghinpstvwzADEFGKMOSUVWX", + "ENLM78BCKLEFCMcmlmsCfgGHcd3dCDqrTUFGghHIpqUVxyTU56qrMNrBvF34GH67lvdednuv" + "wxmnPQ45no0auDVWBCxHbl786gSTCMIS343dfgWXyzABijFGHIvw56455689GHwGhi09uvBC" + "HIisSTmnsCmwcmopfp2cNXjtDNwGopoycdfgNXqAWXistDDNNXcmjtuDmw8i6ggqfg_" + "035689cfhilmnoprvxyCDEGHLMNPTV", + "JKnoIJ3412ghRSrBBCsCQRKLwGdeajCDoyklakfg09ajzAJK89dn8iuDmn01uESTsCpzqrxH" + "HIJT4eOPGQ56mwisfppqPQwGcm3dGQzJ1223lmpzPQ67bcGH12IJlvCMuvHIab5fhifp0ais" + "MWsC097hcdis01ijFGvF89lvhrFGajjttDrBBLLVDNpzqrBLpqophieo4eeopqbcab_" + "023459cefgjlnorsxABCFGJKLORSTX", + "45xyOXqrhrJKvwdeIJcdGQzJAKopklUVWX2clmqAOX34MN56rBVWajnoFG67OPEFUVFPkuPQ" + "uEKUHIstAKijqAOXIJblNXeoghgqOPlmuDAKtDjtGHLMWXtDDNpqNX6gcd1bbcvFgqdeKUQR" + "oy8ihiHIJK3dOXmnKLyI7hoynoHRJKBLoptDhr01zJrBBLmnLMPQjtFP9jjtpzoptDdnDNnx" + "MWhrdn_1245abdeghlmoprsvxzAGHKNOPQRUW", + "xyNXzAEFABdnkulv34uEFGfgajuDdeFP45wxBCyImwuvENCDakzJefrBBCophrwG5fCMcmsC" + "GQisfppqyzxy89mw7h5fEF8iGHFG2cuDPQrsnxMWMNENxHyzzAOP34PQAKuEABisHRJT8i09" + "RSLMcmBCzJ3dqrdnSTCD89pq3dqrCMmnTUUVopnoyzABMWoyeo8i4eeoissCiszAAB8ioyyz" + "_02359adgiklmnostuvwxyzABCEFGHJMNPQTW", + "BL4evwmnuvABvweowGqr78mwwGgqPQOP89bl23rskuuvcdst01QRGQGHPQqrTUMNSTISakfg" + "BCcmvFyIOXAB2cOP7hHIlmzJmnIJFGNXopbclvHIOXGHHRefajcdJK78nofgoyabrsijdeEF" + "67gqRSqrCDLVDNOPuECDKLeoopFGPQajpq4eBLLVuDsCCDWXijis8i56pz5ffp5fijAKsCCM" + "qAAKsCrB_13489bcdfknpqrstuwzABCGHMPQRUV", + "xypzopoyefGHxyPQktijxHxyWXQRCD89zJtDhifgpqgqjtGHdeEFefisBLcdBCABzAoyDNFG" + "ghdeCD6gabqARSGHeocmbcoy7hENMNFPklHIyzqrtDcdBCvFsCKLktrsgqENmw2c4e09hrHR" + "ajJK7hKLJTabST78LMuDzJKL6756GHLMRS89lm8iismnFGpzEFuEnxxHHIkusCCMISHIaksC" + "0aiskuuE_0679aefghikoptxzBCDEFGJLNQRSTX", + "2cuE34ajmwlmuDuvmnQRWXnxoylmMWwGqrLMklpq45yIVWfp89UVxHBLOX34blHRRSlvmnoy" + "nx56KLijeoWXkttDklrB3dhrTUSTefabLVBLUV7hjtlmmncmcdVW09qr01vFkllmnxFPHILV" + "rsIJijdelvrBuDpqhiblzJ1b6gCDqAuDisHIgqABBLLMcmktkumwuvMNuDNXvwktDNvFuDqA" + "ABBLOXABLV_2358bcefijlmnoprtuvxyBEHMOQTUX", + "DNOXVW56ijtDrBuvzAjtrsvwIJPQABHIxyJKFGAKEFBC4501GHpzuDFGyzismwqrabcmEFJK" + "09fpABWX0167OP56ghuv9jvw78mwsCzAwG67BCABUVKUwxuDPQstMNpqDNijmnbcsC12cmrB" + "2cuDAK12mnis8icmsC5fisQR8ikuakMWRSBCqAnouvCM23OXsCWXVW2c0aCMoyISyIoyISMW" + "bcabakktstsCstCM78LVktBLrBhrrBBL_13459achjlmnrsvwxzABEFIJMNOQWX", + "GHrsHIabwGUVktuEMN56IJ45ABzAsCisVWku89nxFGKLqr0134no09xyLMJKpqyzABvwzAuv" + "3dOPEOEF78ijLVvwOXCMsCENdnkl8ilmPQTUFGop67GHSTTUgqnxstHIBCajpqQRRS6gblab" + "wGGHxHvwHRqrGHuDKLktxHuvMNtDaknoJKlv0aENsCisUVKL78zJ8iEFjt9jxynxisyzijWX" + "aksCdnnxOXCMWXOPFPoyhiPQMWCMijeo_15678ainprstuwxzBCEGHIKNOPTUVW", + "67FGUV45wGOXBC01VWCMIJuDPQmwABBCJTyIzAMWMNzJrsEF565f34ISopab894512noENMN" + "ij233dpzeffppq2crBABJT9jhihr7huv5fqrcmST01UVvFFGjttDijbllvRSqA67vwCMdnAK" + "34wxFPqASTmwBCGH09EOopKUJKwGhiuEcdbckuxy23cd12EF23IJyI9jjtijENMNCMEOaktD" + "MNJKxynx01DNtDjtIJkt9jdnkuJTnxIJNX_0245678acfhjpqsyABCDEFGIMQTUWX", + "23EF67RSqrAB78ijkt09hi0134LMghkuopblCD6glvlmBCPQCD1brs09wG23IJQRtD4eblTU" + "GQFGFPnosCfpQRKLAK89CMqAKU4512op5634OPzAyImnnoopgqOXuv67AB78zJktwGoypqmw" + "6gpzvwKL893dpqwxjthrLMmnEFvwdnENDNrBlv45tDMN01gqRSijTUnxvFLM6gklxH09nx9j" + "uvqAFPST67mnjt9jAKqARSQRuDuEEFtDFGGHHRGHSTFG_" + "12456789bfgijpqrtuvwABDEGJMQST", + "CDghCMHImncmvwhruvvFkunxBCVW67lvrsTUWXakvwkl12PQrBhiyzQRlvJKhrxyyzMWDN01" + "FGfgNXktef78OPeoABCDGHIJxy89DNBCqrPQ672cNXjtgqijUVVWtDFGWX0acdajHI12HRrB" + "zA5623mn8iklhiLMUVlmBLwx5fIJTUqAABdnghjtqAhixH34nopqLMop6712MNGQgqnxdnEN" + "qABLmwAKqAMNlmxH56LMMW6756KUGHHIIJzJIJ_" + "01789abefgklmnoqrtuwxzACDFIKMNOPTVWX", + "09vwde3d34TUefOPVWEOfgUV01xywxzJlvvwmw89dndeeo2378MWKL6712klnxsCPQFPOPkt" + "dn01stIJijENhrisajabmnlm09EFefCMoppqqAFGnoQRLVgqbcpz56op6gzJOXIS8iAKWXuD" + "pzqAsCajCDjtxHJTajghmnfgMNOPtDKLENCD12IJRSdeLVKLLMPQwGBCQRCDlmJKOPhiijOX" + "jtKLghuDABkuBLuEktkuLMOPISIJzJpzzJfp_" + "02345689bdefhiklmnoqtuvwxyzACDEFGLOPQRSTVW", + "QREF0901MNghRSTUxyzASTEN1bABfgvwklCMrsbcwx89lvqr56uE67hrpqopMN7h45LMPQKL" + "vwKUNXrseohrpzgh4egqMWOPOXoyeowx349jsCBLyIqA6gajgqef5fcmzAhiLM78KLENoyku" + "rBNXJKfpFGpzakISDNWXpqyIxyzJabTUajBLMW6gtD5fij01ab09yzwGrBhroyeo4eUV01QR" + "hiTUEFFG7hblxHmwPQFPhieoop1bcmmw2cwGEFoyuE_" + "1456789bcefghkopqswyzABCEFHLMNQRSTWX", + "fgENEO56xylmMNsChiyzFPrsuvefwxuExyCMajabVWwGuvUVoyhrSTmnsCcmRSlvijghLVrB" + "MWqACMcdfgKUyIhrMWmwrsgqISdeopWXLMPQOPsCOXefMNfgPQisHRQRGQwGwxAKDN2cIJno" + "JK674e5fuDqrbc78ab89xyWXmwcdkuLMoyCDbcgqKLOX8iajdnVWbl2cak6gdexHisrsnxBL" + "xHrB0aeokuajcdlvbloysCCDBLDNSTbcabajcdCDab_" + "245acdefghijklorstvwxyzACEFHNOQRTUVW", + "GHijmnyzisKLFGzAyICDajrsHIuDvwABrBlvyzzAABijBCsCcdAKpzbl12HRvFTUghCDDNEF" + "NXCMku1bdnlmfghiLVlvJKwxSTuDgh895fuvCDgqbluDlvmnnomnakOXhizAGH8ilmfgMW45" + "56deVWWX34DN0a45VWxywxFGJTyzghhrABrB67zAGQhrefBLAKAB09LVkuoyfg0a09eoabgh" + "HIuvuEblvwlv8956abzAoyyzzAOXxyOPOX788i67issCis_" + "23458abcdfhjmnrsuvwyzABCDEFHIKLMNUVX", + "JKGHfgIJxyCMtDyIISHIHRKLbccdFGEFTUghUVwxvwaj09abKUJKJT1bVWpzGHIJoyMNEOxy" + "eobcHI4evFLV12jt23HRCDENOPBCakWXoytDqrOXlvEOxHmwuEBL01KLmnJKHREOpqfpDNbl" + "ktrBhrVWeoLMOP12CDwG78cmklnoyImwtDJTIJrB1bBLwGBCefblktrBCM5fAKWX7hhrak0a" + "qrMWqAgqyIsCCMWXxy6gIJwxAKxyrsstlmJKrsJTSTRSST_" + "189abcefgjpqvwxyzBCDEGHIJKLMOPRSTUVW", + "rsuDuvpqRSQRhrklcmbcbldngh12ktklENjtLVis56OPEOEFvFrsMN01GQoyDNuEcdHIstkt" + "xyVWUVwxdePQ23STMNWX2coyeoxy67FGIJGHopvwBLJKklkuuvabbcAKcdHI09fgKUabOXEN" + "akTUij4epz89OPAKeowx09de0aFGGHvFFGGHsCophilvkl1b5fijakMWzJefHReoOXdeEFcd" + "oydeUVeoRSklyzoyblqAgqbcxyqAAKajMNQRLM1b_" + "02359abcdhjklmnopqrstuvwxzDFHIJKLMNOPQRSVW", + "efku12IJTUzJSThifgLVRSCMrBHIGHBLeoghlmijOXstJTOP5fakKL0asChiLMuv1byIajCM" + "ISoy4eeoJKvwENUVIJbl12FGWXuvEF4eijfpisVWKLdngh1bLM3doywGabHIvFLVOXHRyIWX" + "CDKLbllvuD34xH8iCMblrsoyPQjtNXop45OPqruEnoGHAKQRrsPQHRfgfpiskusCmnopuEwx" + "vFnxoytD34dnjtCMzAyIxHpznxzAISHRwxyIoyijdn3dhidn_" + "02458bcefhjkmnotuwzBFGHIJKMNORSTUVWX", + "EFfgENMNuEFG23kuakvF780aab1bij6734KLPQghhi56GHajwx8iOXuvktqrAKWXvwlvzAwG" + "pzopGQBLpqKLvwxHcdyzTUwxnoMWbcstxyxHblsC4578cm23isKUAKCMIJgqqAwxMW1blvbl" + "dnijmwabgh1bhi89gqOXVWsCKU12ijisop8i6g01isghyz9j09jtGHHIuv01yI9jtDHIDNno" + "uDjtajoysCWXOXGHpqOPFPFGeoOPOXpzuv4ezJwGmwpzwGcm2cmn_" + "0124567abdghjklpruxzACEFGHJKLNOPQTVW", + "sCkldeaj34uvTUefwxktuDwGxH23mnpzopMNlmnoGQzJmw9jisJKdnlvbl12kljtDNbccdIJ" + "ghpq4589ktklfgrBLMvFdest01MNnxEFUVdnVWwG3dwxzASTBLzJrsqrvFWXnxABBCUVTUoy" + "VWrBNXtDRSktUVwGzAhrlvghrsyInoABSTrBQRABhrak34TUst7h23ab1bPQISyIFP121bIS" + "MWSTbltDlvopfppzVWQRfgUVoyghrsvFFPfpvFsCCMMW5flvsCbl_" + "24589abdefgklmnopruvwxzCDEGIJMNQTUWX", + "xyPQ56WXyzklOXwxxHktqrlmdersFPRSMNMWWXxyQRvF78HRsCzA12AByzwxhilvtD01OP5f" + "89zAPQ67noHIFPvFGQuvSTkuOXkteoklLMvwKUJKqAjtmngqAKJTPQfplmOPJKhrIJwG78qr" + "klqAAKDNpz9jBLMNcdrBBLqrrsHIDNopdntDjtmwMWuEnxuvcmEOuELMno9jqrmwwGGHuDtD" + "IJ6guv8ivwxH09wxghqAQR01opRSQRKLJKvwKLfgkuzJpzefzJfp_" + "2567cdijlmoqrstuvwxzABDHJKLMNOPQRSWX", + "ABMWzA01hi5fENBCstPQcdopCM09ktMN78LMKLAB45WXvw12FGEFdefpsCdnisstBCcdLV67" + "fgBLIJ56ef34wxgqzJdeqAAK1b23QRklCDCMKLwGFGsCrB01blktGHLVVWlvnoIJpz89JKhr" + "bcnxRSvFFGEFuvUV124euE2c34ISpqlvTUWXFGGQ78QReost7h9jRSOX89PQoy09IJOPEOjt" + "VWPQuDWXqrVWJKIJrspqOXLVnoBLLVeoyIrBhrEOuEtDkuakkuBCuEEO_" + "02345679cghikmnpqstuvzABCEFHIKLMNOPQRSUVWX", + "34uvJKvFFGHIopfpefrsKL56abyzuDIJGHBLJThruvrBstlvpqqA7hHIgh89OXNXDNuDuvvF" + "FP01oycd12IJdejttDFGrsyICDuDfg67LMghuvbcAKcmmwKUWXzAjt45AB0aISyIst2301PQ" + "VWwGhiMWktku3d4estghCMeonoktst12CDcmyz09deiscd2cLMCMsC5601KLktisef8iLMlm" + "akmnQRmwcdkuwG1bnofgdeOPGQmndn89deuEEOFPRSISyIoynxeo4eeooyyIISwGRS_" + "124679adefhijloprsuvwyzBDFGHJKLNOPTX", + "VWTUvFHIabSTnoHRakBC6723wxhiyzpz34UVLVrBuDdnvwJKFPzJ457hrsQRvFTUFPEOajqr" + "0156BLAKlvCDbcpquvRSopwxoySTvwmweouDoyMNxyjtTU781buEcmwx0ahrrBajMWAByz4e" + "QRgh23BCJKxyKUyzmwEFdeCD6gABtDblzJijhilvijVWqrfp12rsAKnxqrjtdn3dyz23mnbl" + "degqqA1blmkuktAKabkuUVuE1b9j12VWUVajkljt23JKKUJKtDDNNXkukllmijhighhiij_" + "01245678abdghjmnoprstuxyzBDEFGHIJKNOPQSTUW", + "uvQRTUmnUVrBgqPQJKghSTIJKLvwpqklRS6grsopqrwxjtuDpqoy1201lmKULVbcJKxydn45" + "mnxHuvCD6778zJMWGHnoBLLMCMdnFGuD09SThi01op9jCDijyzHRajkuuElmJKxylvWXisuD" + "vwJTzJ12cd4eJTuvrB2cnxVWdnkt09CMLVVWMWCMBC3dnxuDuvklqAxHQRdnABnxgqvwqACD" + "56BLHIIJLMABgqdnwx45RSMNqAoy6geogqlm7h4eIS45fgLMoyAB01yIeonowGnxISGQHIPQ" + "xHOPOXghno_2356789bdgjklmopqrtuvwxzBHJKLPQTUVWX", + "OPLMrsUVghoyuDuvstktEF34JKPQIJCD67FGqr5645rsakRSjtVWtDvFKLwxxyxHHIyzefzA" + "zJJKABfgpzST78efMNCMCDuDuE67wGEFEOOPde09LV5fbl6gHRRSISIJzJzAqAgqfgefeono" + "nxFGlv01blhiBLcmmwwGGHHIijku5fuvLVTUbc2c12hiOXUVHRabcmgqmwjt3ddnuEEFFGwG" + "vwEOENDNtDjtajakkuMW893d09VWNXJTIJyI0ayznxku89akDN8iwxxHkuvFUVisuEuvABBC" + "CDuDuvvwwxxyyzxHsCisop0afpqAfppqgqgh7h6756vF8iop", + "KLQRuEyzab89qrijhrhiwxmwbcBCxyABpqgh23sC7h098iajPQakklcm34STmn56TUWX7845" + "12gq89ghRSlmnozA01wGJKfgyzFGisIJktopmwGHrBklvFVWOX233dHIBLGHuDmn12CMlmwG" + "2crBijblIJ6gmwGQlvsCwGblmwabhi8imnJT0anoCM56CDeoopLVBLhrFPpz7hvFzJGQuD01" + "WXfpLVpzOP4eakFP0azJuvkuqAOXfpisJTdn09STnx5fRSuEqrxHrsrBVWnxhrdnLVBLrBzA" + "BLfpLVTUKUWXTU_012345789abcghijkoprtvwxzBCDEFGKLMNOPQRSUX", + "blktOPlvLMlmOXvFCDBCEFGHENmnlvABJKxycmMNHIKLFGrBPQhrEFakISdednGHBLyIEOFG" + "wxqAEFENeorBde4e34ijISabvFvwwG45opajblefbc7hHR78RSKUaboyEF09KL6756ghNXmw" + "DNGHNXnxhiijfgEOQRFGtDbccd45LM01CDvw2cRSpzuvghfpFPJKBC12LV2czA6712STgq01" + "78pzBLhiABCMIJ8iRSisuDCD8iCMHIqABCWXcmCMOXOPPQHRGQvwOXtDHIcdwGjtIJtDzJgq" + "WXGQMWuDCMQRBC_134789abcdehjklmnoprvyABCDEFGHIJLMNOPQSUWX", + "qr5fSTuDHIvwEN89lmIJCDOXTUnozARSNXeors01lvvF23CMuDsCCMopQRpqktPQuvqr78qA" + "KLDNFPKUrBBL4eLVyzdeGHHIzA121bxyHRklAKrBblnx8iKLlvJKbcabijyzOPhighkthrrs" + "VW2ccdajFGcm7h6gIJHIdevFuDUVCDopKLBCmwfgefGQPQFPwGlvVWqryIJKGQmwBLblxHCD" + "fpWXrs1buDijabghmnGHrBBLxHisajoyijKLhieooynxdnKUnxxH3dAKqAyIISgqqAAKSTRS" + "HRGHwGmwRScmmwwG_034578cefhjmnqrstuvwzACEFGHIJKMNOPQRSTUVWX", + "CDstFGfgKLqAABBCsCrs89mnIJabHIefwxxyak23bcSTopuDCM12degq0aWX2c7h676guvVW" + "MWLMHRlmmncmcdUVnxEFeoQRpzzAqAMNxHvwoy3ddeopfpeftDuDkufgNXcduEEOhrpzzJst" + "ij1bklbl1bktakaj9jtDgqlvklku7hvFJKfpOPOXbcabIJjt5fbccddnDNvwpzwxvwDNuDuv" + "vwwxnxnooyyzzJJKAKABBLLMrBKUAKqA45NXKUktBL3dfp5fCDissCABfpispzJTzJzALVAB" + "rBhrBCBLBCJTCDDNstrBKLJKIJRSISIJJKKLBLrBrssttDDNNXOXOPPQhr7h", + "RS67abPQefST09OXCDpqHIOPkldeIJqrQRmnPQcddnJKxynxnooppqqAzAEFENMNMWWXOXOP" + "2cbc1b78aj3duDVWlmktrsstjtijhiLM45WXcdOXuv56qrFGwGvwEFfgDNuDuEsCmn01CMCD" + "rsku67tDNXoppqqAzAyzefxHlvAKgqjtHRDNfpJT899jijnoGQis09cmzJmwMWpz5fktqr01" + "GHFGvFvwwxoppqqAzAEFkuyIuvEOeoeffpvwuEku4eAKEOoyNXblLM6gklgqpqnx6gxHBLkt" + "pqqrrBBLKLJKzJKUeoyIstjtijnxISGQ8isCoyis8iyI7889bl78sCoyeo4e", + "67sCuEWXAKwxuvopCMFGno12mnOXxyFPKLyIijISlmmwvwcmzACD012cyzvFUVtD34STqA3d" + "ajEFab45aknxwxmwcmcdvw4eMNAKKUrs34IJjt12mwEN8izA563dpzktMNMWWX5fFGHIFPzA" + "LMMWis9jtDsCdnqruvjtaj45cmABkluD78ij2c09zABLlmopQRCMsClv67zJIJfp8ieopzcm" + "is8i9jfgnxoy7h09mwJKsC2cWXhrCD12ghyIrsijuDPQAKrBOXBLwG7hGQhigqhrISwGOPuE" + "qAEOAKuEoyijWXmweo_0123456789bcdiklmnopqsuwxyACEGHIKLMNOPRSUX", + "bcpzstgh56OXklrsoyjtpqeoqruvyIRSkt348iFGoymnlmIJstQRuDop23kl45OPpqkthino" + "PQWXhr344edezApzrB09JKOXKL89MN12dnisDNOPAK78sChibl09MWklopVWCMUVnxRSBLGH" + "dnmnxHfgWXlmkuakVWzJ56LVblabWXaj7hijbcisnx3d67uDcdhr8iBLxHTUhihr45rBBCcm" + "ghCDijuDSTab1bajOXijOPhikubl7hHRhrxHFPlv7hvFOPmwKLqrghrsfgLMlvefMNcmblLM" + "JTqrKLJKAKqAgq6ggqAK_0234689acdefghijkmoprstuxyzBCDEFILMNOPQSUW", + "uvvwrsuD3423EFwx09uvENfgefbcMNcmkufpstQRRSktrBpzLM459jzJTUPQOXyzpqvwrskl" + "UVeomwAKhrOPst7h01jtlvzA12vFISuEgqLVhiWXDNSTABqrdnOX568909rBtDktxyyzxyrs" + "DNwx3445klxHHRnxyIakMWisefnoTUghajWXmnLMJTlmxHMNcmENoyRSyIzJ9jpzMN01kuzJ" + "HRLMjtfpmwJTfg6g562cgq6gfpAKdnabST0a9j893dqAOXOPAKKUcmAKtDgqwGDNghNXwxqA" + "DNtDuvvwwGuvwxxHnxdnnx34_012348abdefgijnprstuvwxyzBCDFGJKLMNOQRSTUV", + "uvlvklENIJOXpqVWabIS09KLhiTUOP12wGGQPQFPvFUV3dJKwxxyoystrsuDMNopyIijoy7h" + "wxnxmneoqrisnoVWLVLMHRdnWX8icd01uvsCqAbcVWBLrspqabCMghisuEHIIJKLlmSTmwxy" + "ENefdeefzJAKoppzzJhifgyzzAyzEO09RSgqrBABqAyIoyakefktnxdndeeooy0aSTkuajak" + "ij6guEsCxHabkuOX4enxTUefdncdcm2clmhiHRbcUVabGHxHxyyzfpVWpzuEyzfpEOxyuELM" + "wx5fMWLMhrfpwGvwlvvw_2379abcfghijklmnpqrstuvwxyzACDEFGIJKLNOPQRSTUVWX", + "uvuEvw45dezAefqAmnuDlmklhicmQRgh34uv78KLmw2cLMdnIJRS09gq890aajwxwGPQuDCD" + "OP6gijHRvFCMhi7hbcabpquDxyUVdekucdGQlvwxajSTJK78bcMWCDde67KLNX1bmnhroyyI" + "xyxH9jlmabmnjtnxuDISklGHvwMNeoijBLajoywxyIISoprBAKzJblxHlvklkuBLHIDNMNij" + "vFLVFPdeJKcdpqis78IJvFHIdnpztDBL23jtrBbcop12hrtDnxsCnouDrBTUKUKLzA23BLrB" + "oppzTU9jfp5ffp1b7hbllvblhrrB_015678abdeghijklmnoquvwxyzCDGIJKLMNQRSTUVW", + "yzGHBCkt09LMMNENhi45UV7889lm56cd67ijtDuvde9jST23EFcmjt34klNXWXMWHI4eHRDN" + "lvsCmw6gAB45cmOXNXENkt23opvFVWbleo5f12fgCMKL78lvghvFhizJMWWXisRSFGBCGQst" + "1bwG34abtDuDCDgqrs23bckuTUQRmwjtPQJT9j3445uErBGQUVghdeRSakhrIJrBjtBCCM12" + "cdLM67LVBLuDkuEFsCrBGHuDDNENhrpzfpfg6g677hhrrBBLKLJKbcde8iis8iHIxHzAcmmw" + "sCIJzJnxIJzAISgqqAgqabGH_" + "0123456789adefgijklmoptuvyzABCDEFGHIJKLMNOPQRSTUVWX", + "01jthiopijxynomnzAghtDMNajhr34LMhiPQklakabijfgCMlvAKqAbcvwNXxHUVrB7856gh" + "45isefuDDNpzHR5fcdJThioyKU09uDNXzJJKAKyIwxMNbc89uvKLku3dVWghsCvwktFG56st" + "klblUV09GHcdENdeajFPOP0aisIJab1bTUUVajfglvsCjtHIktnofpMNoyeo8iklyImwoyST" + "cmJTstCM4eeonodnrs4eisxymwnxLVblsCVWdnFGxHEFBCnxlv1bBLJKFGzJstFP3dpzvFFP" + "12LVlvzJENDNtD1bbl1bDNENBC_" + "012346789abdefghijklmnopqrtuvwxyzACFHIKLMNOPQRSTVWX", + "45HIktfgGH348923bcFGcd56EFJK097hzA4534121bbchigqgh7h6756454eeffpab23MNhr" + "opfgLMcmUVwGFGvFyzIJHIOP899jijyIdnnoQRhrrBABzApzopeoeffgBCFPKLJKAK7h5fRS" + "zJmwqAcdcmmnCDtDstDNGQQRHRpzOXuvvwvFFGTUwxCMuDLMlvvwmwiskunxzJNXMNMWsC01" + "VW8ihr7huv12WXDNmwKLbcoyEO3duECMGQSTwGdnmwJTissCisCMGQlmOXWXMWMNENzJHILM" + "ajabakkllmcmrBLV3dyIyzoyop8iblvFktkunxlvblBLIS010aabrBfpajpzLVfp5fKUHIxH" + "EOvFuEEO", + "uvuDcdFGbcdncdcmJK01delmIJmnmwwxHIzACDvwOXefnouvcdMNuDGHLMABEFvFWXBLyz89" + "kllvuvENmw5645nxCMCDDNzAxyyzzJIJHIopmwwxnxcmvwpzqAlm78HRAKnouErs675ffg6g" + "ijxHgqOPkuJKmnakghhi8i7867nxtDfpsCeoIJpzjttDVWzJisqAEOuEku8iENTUlvpzbloy" + "QRFPfp09vFwGqrlvlmcmcddnnxxHHIISRSQRGQwGfgSTAKEFyIoy9jsCeo4eMWqACMLVeovF" + "1bISFPoysCBLyIoyhrissCeoAKrBhrrBBLLVVWMWCMsCisjtIS4eef3dtDNXdeefDNtD3d8i" + "fg23TUNXjt", + "12qrajKLstkloyoppqqAzAxyfpabBCbcwxUVVWhiST1bLMpzijyIIJgq23xyABrBqrTUENde" + "JK56uv67gqvwoy0112dnQR2ceo45OPuD56UVGHOXblCDBL3dak5fpquvuEmnGQrs676ggh78" + "HIqrdnIJEO4e23MWnxmnmwvwxHpquDfpCMuvzJlvLMnxblfgdeRS09dnsCMNnxpzrshrhiQR" + "zJSTwx1b12wGblefvFRSvwpqxyCMMNMWWX01wxxy67VWvwgq4evFFP6gpqgqqAKLPQFPTUlv" + "AKvFJTlvPQKUqAgq6gzJpzblzJ67ab09_" + "0123456789abcdefghijkmopqrsuwxyzABCDEFGIJKLMPRSTUVW", + "nopqzAhiop78dednnxxyoyABuvTUBLst89kt01abajvw67klefghbc34ijOPuDDNCDCMuvgq" + "MWuDisvwuvkuakabbllmwGxH12PQQR5fvFRS1byIoy238iblpquE3dstispzIJfpdeLVyIwx" + "uDuvvwwxxyyzzAABBCcdefhi6gST0aISyzyIfpnozAABBCsCstjtajabbccmmnnooyCMGH0a" + "yzFGdeBLijMWtDwxLVlvFPbllvvFuDGQjtWXtDrBuDuvuDCDisvFvwwxxyyzpzfpfgghhrrs" + "sCCDuDuEUVVWWXUVNXfpDNisstqApzAKHIIJJTTUKUAKqAqrrssttDDNNXOXOPFPFGzJ7hpz" + "hr9jfp5fjt9jrB", + "5ffg6gIJ23GHENrsabqryzrBxyzJ45PQ12pzyzoyJTbccddnfpOPvwpzisBLQRLVijABrs34" + "lvuEsCCMaj23qAnowx45gqxHHIyIAKNXISEN34qAMW01ghhiJKNXyIlmEFnxFGakENijgqfp" + "CDgh7h675fmwBCfgMNENuEuDCDBCBLIJfpajwxoyEF09hixHGHHIKLak12AKHRiskusCqAgq" + "LMCMklsCqr3dnxlm9jyIdnnxisMWcmjtzAoyeouDENGQ9jtDyzopabktzApqqrMNxHENijlm" + "wG9jPQkllmxyEFajmwijxHvFabcm2cwGxyrBcm_" + "12356789abcdfghijklmnopqrsuvxyzACDEFGHIJKMNOPQRTVWX", + "WXEN01mnTU4567EFvwlmnohifgklBCbl56UVqrcdIJOXCDFGrsRSsCyzwxdnijSTbcCMgh4e" + "ab89hiHIyIxyOPnxzAktakajijisKUyzoyeoVW3dUVqApqpzzJJKdn09KU3drssttDuDuvvF" + "FPPQQRRSISIJJKAKABde2cGHsCMWCMqrBL5fyIVWrsjtmw9jsCCDuDEOzJuElvbcbllmcdmw" + "abCD0ajtvFDNNXWXVWLVBLBC1btDfpblpzdeoylv34fpEO5feohr7hfgoyFGwGvwDNGQwxyz" + "zJjtHR9j23EF89677h12qrhr7h788909011223344eeffguDCD67BCqAzAxHxyyzzAABBCCD" + "uDuEEFFGakqAkuakwx", + "01ktLMCDOPijcdde4e3423121bEFEOfgghpqxHxyyI4567eokuGHbllvFG4e78OP89uEEFvF" + "ST5fvwiskuISfguDuvuDCDsCBCrBwxyzabCMnxis098ixHnxTU3445KLFPyI7hghlmklABjt" + "isSTMNefhrLMbc9jdnabderBzAJKoyIJktjtajkllmgqmwhrbc7hmnyIcdBLISMNrsNXwGmw" + "mnno2cyIyzzAqAwGUVqrENAKcdKUAKrsGQLVoppqdnGHHIstnxIJHIxHzJHRnxdnGHdeeoop" + "TUABpzde3dBLdersLVNXeffpUVpqTUqrqAzJrsgq6g_" + "012345689abcdefghijkmnpqrtuvwxyBCDEGHIJLMNOPQSTX", + "45CDBCST34rsQRRSrB09ABPQWX01EF3d5ffpISpz1289STvwQRefbl9juvghOPklwxvwvFFG" + "GH78238iHRfpisGQPQFPnx890akuuEuDCDBCBLLMMNdnAKqAcm8iTUDNCDCM3dsC09yzlmhi" + "EO89lvAK7hvFgqrBzAhrKLRSrBpquvBLwGABLV67VWSTLMblvwKUKLLVzAcdyzde56xy45nx" + "5f78mn01mwENisvwQREFxHakyzENhiuvVWHRzAkufgak67tD12ghCDktNXBCABBLfgakktef" + "hrrBzAhryzBLyIHIst7hhi8i5609898iis8i890901_" + "012345678bdefgiklmnprsuvwxyzABCDEFGHIKLMNOPQRTWX", + "klbcHIjtTUkuzAEFFPOPcddeRSQRSTvwxytDlmyzVWpzfpopeofgyImnISGHWX5fvFUVNXVW" + "lvoyyIjtqAGQghbloy89klajwxENlmRS4509uEKUvF01vwrsiskugqUVijSTISIJ0auDwxeo" + "zJJTajklis8i56CD3dwGpq6gblyI237hTU89qroyuDyIAKnx12norB342cop45ijcddnmn12" + "mwUVBCpqno01gqbc4e78cm09BL0112ghmwzJwGGQhi23CM3dPQkupzhrzJghmnmwfp5ffggh" + "rBdeGQefwGmwabABijajAKmnGQabnohrABfpopno_" + "013456789abcdefghjklmnoprstuvxyzABDEFHIJLNOPQRSTUWX", + "WXTUISKL34wxcdJKABlmfgBC67opvw3dispqmnajLM09KLzJJTrsyIuvMWLMLVsCisRSjtCD" + "uD78GHBLrB45LVBLDNIJtDktMN9j01JKFGkllmGHNXcmvwHIAKabakklfpIJlvEFENuEISoy" + "JKyIBCvF0912kuIS2cKUFPcdxHqAqrvFrBBCwxQRakeoHRhrEF0auE7hnoFGopakxH6gvw34" + "GQFGnxUVijSTAK3dgqwxHRnxCDnocmpq67hitDoplvijBCABTUblKUlmklAKvwlmmnAB9jno" + "mwkt78uvBCmn89vw78uE67CDdn6gghmwvF3dEOuEcm34_" + "01345679abcdefgijklmnoqrsuxzABCDGHJKLMNPQRSTUVWX", + "EFxy56uErsvFdeLM67STstwxUVefklKLkttDQRDNlm09lvMWLMLVOPHRRSISzAijajTUabAB" + "nx89bllvxH6ggqGQIJ1bJKUVqAqrrBBLKL0a4eNXpzMNakhiKUnozJcmdnfpvwijHIFPktHR" + "EFFGisdepqJTsCnxstEFxHuDENGHpzrsuvyzdnblklgh78JKAKzAIJ9jCMBLkunxyIlmoybl" + "uD3dabHIDNwxrBstjtaklveobc2cTUbcblkttDlv9jDNABNXjtPQvw6grBOXhrWXVWGQ8iOP" + "rBstUVdnVWoy45343ddnPQ3d34wx45KUAKBLqAgqqAKUnx_" + "014678abdefijklmnrstuwxyzABCDEFGHIKLMNOPQRSTUVWX", + "12klefyzOXpqzAmnWXIJnorsMNopEFlmvFABfpqr09gqkt45deBCENRSQRyIxyxH56yzpz6g" + "JKstzJ2334BL01ISklKLLMmnJKKUTU12isrsIJeoFGisij9jakEFyIoynonxxH5ftDkt4eIS" + "fptDDNopjtpztDsChiisstjtajabbccmmnnooppqgqHR5f0anxzAyzxHxyEOakyzlmzAqAqr" + "AK2cqAdnJKBLqrgqghrspzmnrBBLKLJKzJpzpqlm7hOX09lvhruvzArBLVCMLMBLrBrsyzvw" + "7hwxuEuvvFxyFGgqopFPLVGHyzMNvFbl78673ddnuD3dmnlmmnnooppzzAqAgq6g67788909" + "011blvFPOPEOENDNuDuvMNMW", + "yzgqghhrpqbcyIKLcdopnoOPisxy89PQQRRSEFdeST2378abwxbcgq09EOrBrsJKstmncdmw" + "NXLVwG5fGQBLISakrB7hghbccmlmmw4edelvhiMN562ccd3dvFzAHI0aab1bLMpqopyzIJuE" + "nxJTKLzJoy89bckuefdnblajnxLMTUabuDwGJTeoGHDNHIfp5fkuIJFPjt7hJK1bajEFMNMW" + "lvblqrQRefHIak78FG1bmnlvdetD56rsDNkuGQnoQRpzefqrnomnmwwGGHHIIJzJpzcdcm09" + "0auEdeAKmwEF7huDjtef899jjtFGpqfp5fvFeftDhrKU4eAK7h_" + "012345689abcdeghiklmnopqrtvwxyzDEFGIKLMNOPQRSTUX", + "8iyzzAOXSTLMBCPQAB09xyKL67vwJKopoyyzzAqAMNOPENNXOXuEEFvFTUqrrBQRPQGQwGis" + "stktkuuDCDJTmnmwwxvwOPgh45zJpzHIxHwxBLnxfpdnCM2cvFRSbccmyI3dhiaj0amnij56" + "ef4e45oydeeonocd3d23fgghbcisAKKUrBhrrBBCklblabABzApzAKqAfpop126gnxBCsC9j" + "tDktkuDNNXHIxHnxnooy34jtIJ23WXtDDNgqhi34NX2cOXENqAmnBCABJKzJzAMNBCmwCMsC" + "KU564eeooyPQQRHRHIyIoyeo4e45566gghhiissCCMMNNXOXOPOXVW8iWXOXUVDNtDEOOPFP" + "vFlvlmcm2c1201099jjttDDNISmw", + "ENAK23KUJKJTqrqAABMNklFGef89lmfgEFLMblnoBC2c121b7hyzCDopoyBLyIuDmnuvLVrB" + "BCwx09xyuE0aabwGwxKLFPPQGQzJyzmwktcmkl892ccd3dDNtDzAuDxynxvF78bldnlvAB34" + "vFbc3dCMvwrBeoyzCD7h6gtDghFPhr7hrBakajjt9juDijpz45zAuvfp5fabvwoyxydnblBC" + "STajOXCMstMNvFlvWXVWLMAKENmwijGHvFOXmnnoFPsCCMopdnnowGmwvwmnnouvNXOPsCis" + "pzuEuvENISvwwxPQMWxyyIvwQRabPQOPuv8iuEajEOabuvzAISvw_" + "01234578bcdefgijklmnopqruvwxyzABDEFGIJKLNOPQTUVW", + "uvJK12bcCDuDENTUajIJcddn2cMNABijDNSTLMRSLVlvUVaknxyIblku8iQRWXnoTUMNop56" + "lvkl67EFSTNXgh1buEuDblrsOPFGoyyzPQhrEOfguvuEEFajMWxH1b9jHRBCHI12VWzAcm09" + "ABKLQRjt23lvLMBLKLRSrBDNGHiscdQRMNKUFPvFab89uDmwoyqABCwG01ABeoAKbchrGQTU" + "ab7hCDoyuDyzxyKUBCqAST78tDDNJKFPNXghtDrBKLCDjtuEuDCDlmLMyzKLJKsCUVklVWis" + "hi7h78lm5fIJhrpzajrBpqqr9jpqghcmmwpzajzJwGfppzzJpzfp_" + "0123456789bcdfgijnoqrtuvwxzBCDGHIJKLMNOPQRSTUVWX", + "WXvwUVBC09ijabcdajEFHIbcABlmmnoppqqrrsLMMNklVWFGstij89deghhief2c4eNXfgGH" + "xyyzuvtDzAlmABqAqrrssCAKisJKMWblIJJTSTRS6gBLmwcmDNgqghhrrBABnoktCDsCstvw" + "vFFGnxwxmwDNNXOXcddeKU7hcmuDfpefdednnopzGQ9j5fjtef2cktxy3ddeMNef1bpqgqfg" + "pzyzuDuEENMNCM9jklJKzJzAghhr5frssCCMBCBLKLKUUVVWTUJT8iGHwGwxxyyzzJJTSTRS" + "lvWXisblvFlvUVxH1bvFvwrBFPBLvFuvqALVrBVWgqku8i0aqAakFP0almzATUklkuuEUVyz" + "KUxyOXEOuEuvvwwxxyyzzAAKKUUVVWlm", + "FGlmHIJKef12zAxyfgIJENKLwxyzRSEFGHghklgqstcdFG56VWUV09hi34abHI6gLMqAJTde" + "xyBCuDCDCMMNENEFvFgqIJsCvwAKisbcGHwx78WXzAABuv23PQmwcdrByzwGzAkuxyOPcmgh" + "HR8iAKnxjtzJdnopBLuEOXGQVWeoajGHFPFGGQpzMNrBzJGQEOJKJTTUKLLMvwwx45EF2cMN" + "qAvwHInx0aBCmwwGJKBLuDUVmw344eNXyIoyeofpfgefblGQISbccdMWSTdeGHuvblef5fdn" + "3duDopdnlvvFmnlmlvfpfgghhrrBBCCMMNENEFvFlvlmmnnoFGDNsCCM7hsChrrBBLrssttD" + "DNENEFFGGHHRRSSTJTJKKUUVLVBLMWAKqA", + "QRzA01ghpzopklRSUV89pqhieo12qrWXABoyTUSTrsOXijajoppqqrrBBCCDDNENEFFPPQQR" + "RSISyIVWWXhr7856dnuvnxNXdnmnIJ67EN458i787hLMBLBCnoissCtDDNMNCM9j898i2334" + "3djtcdtDop0akuBCuEENak56459jkujtmntDMWuEvwlmlvvFxHwxwGdeLVBLfpHR5fDNtDjt" + "ajakkllvvwwGGHHIIJJKKLLMFPnoblnxIJnoktopMWfpstpqmwwGGQnocmxHrsstktkllmmw" + "wxnxnooppqxHlvabvFHRhrHIhifgefdeghhiijajabbccddeefabhi6gghhiisijajabbllv" + "vwwGGHHIIJJKAKABBCsCcm1bKUFPBLrBBL", + "wxBC45EFnoklSTktoppqghCDhiblefuvmnOPstPQFGkuQRdeENLM09qA3dJKabghfg6g56KL" + "OX01bcMNOPabefEFgqLMIJJKwGFGvFzJVWUVRSnoKLoyoppzmwuvENajMN67WXwGDNyIABAK" + "JKzJqrqAABabPQ23OXFPNXCMLMBLfpoyJTrssCTUEF89cmpqABxyvFqr7hLV3445CMmwijtD" + "yzqApqpzcmuEbcwxcmeolvAKbllvvFFGwGmwcm232ccmGHSTzJwGuDmwcmbc2c12JTab9j09" + "0ayItDjt9jijisrshr7h787hHRQRGQsC8iyIxyxHissCISyItDyIISSTTUKUAKABrBhr7h67" + "56454eeouEdnBLnxrBzJdnBLCMMWHRLVxH", + "qryzsttDcdxyTUIJhrrs34vwMNwx45HIEFLMuEFGENABvFEFuESTkulmblRSjtstktklKLlv" + "wGFGvFJKmnmwwGGHxHyInoWXNXMNLMKLKUUVCDuDcmopCDmwpqfp2cmnCMgqOXqAAKpqjtuv" + "wGmwis8i899jajabbllmmnnooppqqrqAijwGrBeode78zJ12ef6gcdsCdehrMWoyfgTU89LM" + "gqKLLVUVSTpzCMefmnTU5ffppzzJktstISsCkt3dSTeomndn3d23121bblHRnxMWxH5fHRRS" + "WXMWoyCMsCqAgq6g677hhiissCCMMWWXOXOPPQQRRSSTJTJKyIaj0a09898iabKUEO4euEnx" + "ak0adeakkuuEoyAKbccddeKUABeooyEObcOP", + "rs45sttDvwWXAB56lvFGwxcdvFBCaj23GHwGmwmnnooyxy12abHI67IJyzdeefbcMNEFFGkt" + "wGJTzAcdENuDdnEFdeeooppqqrhr7h788909011bbc34FGUVyI45bc4ezJsCISnxdnRSnxgh" + "gqqAABrBIJJKKLLMNXhiCMsC67stBLrBOPqrfgnoVWktkllmmnnooppqqrrsISjtoyIJJKtD" + "jtKLKUUVDNxH9jBLyI5fijAKisajFPTUfpgqqA5fsClvakMWQRISRSHRIJSTtDGH09NXCMCD" + "sCisijjtuDuvEOuEkuak56ajpzhiJTTUKUAKqAgqghhiijajabbllvvFFGGHHIMWzJ452c23" + "34455667788909011bcmnx3d2cdnnxpzEO3dcm", + "CDlmfgBC1223mnENuDghno01cdMN09TUhiuv34vFOPyIIJJKKLLMMWWXOXEOENDNuDkukllv" + "vwwxuEoy89eoopstdn45pqktuvnxHIRS67qrIJxHSTKUvwrs2ccmgq2cQRCDHRqAAKJKuD12" + "jtkuBL6gKLxHnxxHHIyIoyoppqgqghhrrBBCsCstjtajakkuuvvwmwwGPQRSabCMij7hQRGQ" + "hisCRSdnwxJTSTIS5fyzxyHInolmijajabbllmmnnooyyzzJJKKUUVVWMWCMsCeoij4eeoop" + "fp5ffgqAefwx0avwOPNXKUlv3dakMNcdkuAKEOuEkuak0aKUabLVbccddeeffgghhiijajbl" + "KLqAlvuv9jtDjttDIJJKKLLMMNDNuDuvvwwGGHEO", + "zJpzfgfppqab78qryzajQRzAbc89ghcdcm349jop6gLMmw4eLV56hiRSrBpqnoUVwGopTU45" + "FPEFPQpqqrab67gqajqAIJJKmnnoeodecdENopgqMNlmeo23mnbllvAKijBL6gCMstgqBCLV" + "1brsabyzvFDNjtoyKUpqNXyIwxeoqrOXnonxsCBCrBvwuDuv3dmwtDopHRyzdnstMNDN2cis" + "bloynxghLMNXblbccmmwvwfgFPISakzJKLIJdnMN3d2cISblEFefxHkuHRJKuE4eakyIvFef" + "wxfgISDNBLyIuDFPvwwxCDsC8iajhiissCCDoyuDuvlvblab8ibccd3dcdbc_" + "0123456789abcdefgijklmnpqrstvwyADEFGIJKLMNPQRSTUVWX", + "IJrsvwABefzAuvMNLVstHIpzuDbcOXqrdezJGHCDpqqAzAFGGQIJ78cdvFtDST4e1bbc2cQR" + "op898igqlv9jcmFPhrmwvFkude89BLghrB01hrrsisBLEOnxeoqrjtpqsCrs7h233dabfphr" + "JK0901IJ56ktkuTUakajuExyqrVWbcvw6gLVwxoyvwuvqAuDvwijUVcm12mnrBnxENlmBL01" + "wxvwHIOPuvrBPQdnLMKLQRabbcuEkuAKeodegqcdENqAbcklabRSMNAKktlmVWUVMWstKUAK" + "gh0aQRtDPQNXDNCDBCNXhiak01kuOPghuEgqEOuEABpqBCefCDtD5fkuak6gefst_" + "01235789bcdefghilmoprstuvwxyzABCDEFGHIJMNOPRSVWX", + "uvqr7889klIJHIvwLMOX0112ABQRBClmGHmnktFGEFEOOPPQklRSSTwGmwyI9jHIBLKLJKIJ" + "yIyzzAcmijissttDku34hiuEWXrBMWBLvF56vwOXzJzAAKWXHRrBajqArsJTQRAKakvFwxFP" + "67TU45lvOPisqrfpbcpzvFlv78568ifgef4ePQxyOPyzghmwnofgeoeffpGQijOXhiwxpz5f" + "fpoppzyzhr0akunocdbcbllmmwwxnxIS67wGxHjtyIDNtDlmakkt09kllmnooy78DNISSTno" + "strsmnlm1bakrBLVBLLVrBhihrrsstjt8iissCMWCMMWsC6gis787hhigqdn89qAAKbllmmw" + "wGGQQRRSSTTUKUAKqAgq6g677889090aHR2ccm1b2c", + "wxajijhighJKjtLMgqtDstsCqAcdBCKLbcmnno9jcmakmwijkukllvLVdewGCMuE34hi2chr" + "blcm45kt56mn2cAKxyoynoOPWXrsyzghoywxvwkuuvjtgqTUFGzAyzefxyqrrsakHINXGHis" + "uDijnxdnVWnxzASThipqFGrB67tDvFTUGQ3dRSyzwxzADNdeOX7hhrrB4esCqris8iisrsqr" + "wGqA45jtuvzA56UVNXKLyzcdkuGQMNstuv6gPQJKxyTUxHbcblHIIJsClvJKKLblbcLVFPCM" + "STnxgqIScdeodnnxBCoyOXeo4eABQReoyIoyHIGHFGGHHIISeoRSSTqA4eQRABBC_" + "1235679abcdfghijlmnopqstuvwxyzACDEFGHIJKLMOPQRSUVWX", + "rsvwOPfgabABWXyzwx45blxyuDOXUVzAVWKLBCPQ56EOoyWXLMLVVWvFstghyzQReovwABno" + "lvkuzJtDwGrBqA78JKrs4eABuvvwkukllvrBwxbcgqhryI67kt01mnBL6g1bSTGHcdbcblLV" + "rBdeHIuE89hiKL8iyzHRxyIJzAhrpqnxkuakcdefMNxHmwku0akluDlm7889AKuEcm01mwEF" + "qrKL9jFPENktCMMNLMmnHRcmEOEFeoMNmnrsnxCD5fENef09DNRS011bxyoy89eotDDNyIjt" + "oypqHRFGeoopdeEFGHtDFGHI3d23vFktyIklGHHRISRSlvblvFdeENDNtDjt9jpqjttDoyDN" + "EN_045689abcdefghikorstvwxyzABCDGHJKLMOPQTUWX", + "CDBCKLEFyznoDN67LVVWWXNXMNmn34xy2cyzdnBLABAKqAblEOhiUV7hmwcmTUijrBrsdemw" + "cdbcjtdeHIIJVW23ABtD9jqAlmefjtJTENFGPQmnlmbllv12op1btDxyqr78st34blabUVEF" + "QR0a45DNVWGHpq56lvCMHI4ehrUVyIzAFGvFlvkloyeo09FPghcdajdeuEhiwG01yIfgefPQ" + "deOPPQvFcdbcabGQakktlm0aSTGHstHRsCajBC23TUABqAnomnijKU12AKlmoplv34qAzACM" + "uDhrxHqrpqtD4534dnjtKU9jpz3djttDuDuvpqzJdnpzhr5ffp5fpzvFzJnoisFGnx8ihi_" + "01245689abcefghjklmnoqstwxzABCDEGIJKLMNOPQRSTVWX", + "IJfgvwwGwxxHxyPQGQrsOPghvwwGFGEFuEOXyzEOopCDuEENDNstpqKLqrlvJKrshrrBBLcm" + "bcabajijissCCDtDktklcd1b122c564567787hhrrsstktklblabaj9jRSHIwxIJNXvFvwwx" + "xyyIHIGH01mwwxxHGHmwcmmndnoyxynxsCbcxHdnHRqAqr09AKlvrsstkt0aklakgq1bKUbc" + "bllmTUcddeef8ieooy3dopfpcdqASTnxzJisgqLV5fpzklEF8iDNtDnofpbcabpzzJmn5fMW" + "yIFG56jtDNMNxHlmmnnxxHGHFGEFENDNtDkt9j45CMVWijzAKUTUSTISyIyzzAhihrUVqAlv" + "gq34VWrBijajabbccd3d3445566ggqqAAKKLBLrBhrdnMWCM", + "67IJvwpqxynowxcdBCUVbcOXuDFPCMdeeocdghdnpztDabstajishrTU5ffpfggq2334efbc" + "yzabRS7hABpzuvakghvwQRBCku78DNxyzJjtwxCDABzA7hyz89xyVWyInxPQmngqcmISbc78" + "5689mwuEabxHWXuvnx0aopNXvFtDcmHRwGxypqSTbcrB1bBLABopyIoywxAKBCDNxH7h12IS" + "01HRuD4556aj0aSTakrBku01hrtDakQR7hCDfgMWmwlvghjttDCDjtbl0alvwG23fglmCM6g" + "12gq1bqAMWAKakyIUVqAGQgqbl0109TU896g6778vF89FPJTzJ09pzfp01wGVW1bpzzJJTTU" + "_01234579abcdeghijkmnoprstuvwxyzABCDFIJKNOPQRUVWX", + "45HIdeABnoENKLbcmnzAajjtVWcdvFabqrLMgqCDtDstrsrB23IJdnpqkt6ggqqAyzklzAmw" + "lmlvghgq12eooyeozJJTTUUVLVBLBCCMMNDNuDuEEOOPPQGQGHHIyIwGuvCDtDstbcisSTvw" + "uE093dmwTUcmFP7hvFkuOXpqUV78rBop01hrRSnxefde23efxHblSTmnWXakktnxlvvFblst" + "lm1b4eFPbllmmnnoeo4e34232cISVWxyEN5fwxEOOPPQGQwGwxxyoyoppqqAAKKLLMMNOXzA" + "rB7hABQRcmzAzJ0amnuEghfpEOTUJKKLLVUVTUpzkuakfp5fGQuEfg0aEOfppzIJHIsCMWCM" + "wGmwabsC9jMWijQRHRHIIJzJpzfpfgghhiijajabbccmmwwG9j", + "JKUVdednno12abhi01bcEFijTUKLoppqOPIJJKajstmn3dcd2cno8945HIOXIS56akVWBCrB" + "LMWXhrKLBLzAvFFPOPOXNXENuEvwzJrBABqAJTyzoyopzJfpuDpzisklFP0amwcmJKuv67KL" + "78LVCDuDdnvFCDBCwGnxeo7h1b09FG4eFPhr6712IJMWJKxHwxlvrBak5fBLKLcdJKJTJKbl" + "1blvKLLMbluvMNENqAzAvwwG3423sC0avw56ABCM34lvMW89hi45blsCBLzA78bcyIcdvF56" + "deMNLMEOoyeoOP6gefghhiijFPdnvFoyuveooyMNeoyIyzkuefakxyabyzfgzAqAABqAAB_" + "012345689abdeghijkmnopqrtuvwxyzBCEFHIJKLMNOPQSTUVWX", + "hi78UVqrVWRSij23STaj67wxrsKLfg12TU893478lm45mn56UVPQtDLVdeQRIJOXnoabENef" + "DN1bkuOPPQlv09gh6gfpsCstktkllmmnnxxyyzpzpqgqghhrrBcmmwrs23EFABJK2cBLjtuv" + "tDpzwGRSQRcmmw67GQCD4eyILMRSKLzJLMpzopoyBCzAuvvwwxxyyzzAABBCCDvFvwJTlvwx" + "bl4556677hhiijajabbccd3dFP011bHI01MWxyCMGHCMMNENEFFGGHHIIJJKAKqAqrrs9jMW" + "12steohrbcoy455ffppqqrrsstjt9j09011223xylvHIpqxHdnTUGHnxVWOXOPFPFGGHHIIJ" + "JTTUUVVWdnMNLMBLvFlvblbccddeeooppqqrrBBLLMMNENhr89", + "gq45zAktOPSTOXvwrssCUVcdrBABqAwxpqrshrhioprB8ihiijjtstrsLMAKnoxH0956BLTU" + "67dekl2c12010aabcdpqJKdnISHRrBblbccmkt2cvF9jKLvwlvhrstqrJKMNSTHI3445wxDN" + "uEvFGHIJzARSPQop78pqqriseoENEOHI4eOP2334WX8ipzFP1bFGBLrBGQQRrsoy78jtajak" + "stghfgbluERSsCcmijBLeoOXeffgghxyhiijzJ3dLVBLyzlv9juEEFvFlvblabak1bSTCMqr" + "pqqr09rBBLLVoyno2cUVyIsCOPmnxy67PQ0awGVWmwwGGQakPQWXbcOPnxabakabkuuEEOuE" + "bckukt_01245689abcdefghijklmnoqrstuvwxzABCEFHIJLMOPQSTUVWX", + "45PQQR56vwRSnostajLMJK5frsSTisEFktENuDKLyzuvzAFGpzBL8ihi7hlmmn894eBCGHst" + "FGwGvwAB23IJJKoyHIIJ09EFcddeopfpef01no34zJmwmnnxxHGHBCrBrs45cdfpDNpztDMW" + "MNENlvisvFfpIJlvabEODNstktFPGQHRLVAKKLLMCMCDuDuEEFFGGHHIIJzJqAgqJTMWsCku" + "AB6gAKqANXEN67pqak56KU67JK45238i4eIJTUeo89nobcopMNEFpq12kunxqrhrzAisnomn" + "cmFGuEkusCCM78676ggqyzzAqAgq6g6778890901122ccmmnnoGHRSMNENEFFGGHxHxyyIIJ" + "JKKLuENXQRAKLVakPQ89787hrBhr7h7889090aakkuuEEOOPPQQRRSSTTUKUAK", + "FGGHrsuDefUVcduvENuEkuktstsCCMWXOXJKbcab5f01qAajVWakHIwxWXMWqrwGvwvFyzOP" + "gqfgfp12GQxyBCuvKLAKABcdzAisrsCMyIIJeoMN09efmwoyNXqrvwnxgh4eyILMtDDNcm8i" + "JTtDNXQRPQwxwGvFGHmwstklktfpTUKL34eoqA9j2cAKlvHIoyIJjtcmwGuDJKyIsCCDtDxy" + "IJGQmn45eoENvwnobl56KLlvvwwGgqEOrsopklstQRabGHLVHIajak45klVWMWnoMNKL0aIS" + "9jDNSTnxdnRStD34STJT3dstJKdn67HIKLxHrsnxqAHIuDabij6guEajakuDCDmnEOBCABBC" + "CDuDkuakajgqlmij_0123458abdefgijkmpqrstuvwxyzACDEFGHIJKLMNOPQRSTUVWX", + "ajHIGHij56klfgnomnBCghTUABopzJpq01efCDlmkt1bklBCABrBqrgqfgfppzmwxyyzdelm" + "hibllvjtnxghvFDNFPtDxyabqAkl0akthr3dAKMWstEFefzJdnNXgqfgfp7hUVcmRSVWPQCD" + "CMMNFGUVklLVEOENopyImwEFLMST8iFGwGghTUOPMWuEBLuDUVakhi45ISfgPQGQkunxwGis" + "mwakblzA4etDABrBcmcdeoSTyz2c121buE34hr2334oydn45rByInoLVab0aab4eOXhiopcm" + "mnMNpzijblxHBLhicmeffgHIghdeDNNXhiVWnxuDHRxHDNoyWXlvnxLVrBtDOX8idnjtdetD" + "MNrseoisoy8iisrs_01256789abcdefghijklmnopqrtvxyABCDEFGHIJKLMNOQRTUWX", + "IJpq23ktmwRSfpST45bcOXcdghabqreodednbccd12qAJKHI5fhifgGHIJzAakKLHIst0alv" + "abABktbcBCQRklakabgqlmku0axHnxEFWX3dMNtD9jENjtEOtDRS9jrBktsCBLMWJKKUTUIJ" + "CMis8i787hhrrBBCst34HRDNtDdnijaj1bLVblabajijisstktlv89FGvFvwisDNmwGHwGop" + "cmnxmnmwcmHIOPxHnxpq23xHktOXOPPQBLST9jGQwGmwrB6g2ccmdn9j898iisst2ctDmwwG" + "GQOPuEOPPQQRHRxHxyyzzAqApqopeoeffgghhrrBBLLVVWWXNXDNtDktkuuEAKfpzJKUpzfp" + "fgzJTUyI5f456goy6734564eABeoKUrBhryIoyyIABAKKUTUSTISyIoyeo4e4556677hhr", + "EF67RSyzghlmklmnpqpzzAABBCCDuDkuktstrsdelmklxyefktST5fstyzENzAfgfpcdwxQR" + "deeoTU34xyisrshryz12nomnmwwxxyMNOX0109KLAKABRSnxKU9j898idnklktjtajabbccm" + "zArBLM23yz12ghxyBLhrnxrBCM3duvUVkuefEFuEuvfgefeoDNtD4eyIoyeodednmnlmlvvF" + "FGGHakktjt1b23bllvxHhiHRghstNXDNNXopqAAK0109hizJ3dJTLMUVLVKLakpznx0ajtIS" + "PQEFakyImnENlmLMMWopkuMNtDAKsCCMfgDNLMQReooytDyIqAMNENuEkukllmmndndeeffg" + "gqqAAKKLIS3dEOEFFGGHHRQRPQIJHIwGvw23MW12uvIJkuak012ccm010aakkuuvvwmwcm2c" + "zJ", + "cdPQabQRfgOPkl67uDCDtDstGHSTghJKoyuvbczJdeJTefrsvw56yIhicduEENisCMajEFjt" + "MWabdekuRS89eoAKuEHIpzkubcgqghhr2c5fzJEOfp09uDVWPQIJuvOPHIWXqAFGuD0aoy5f" + "AK45vw89EN23wxISEFfgstabyI78oyFGakzApzvw09blCDyIfpEOQRCMUV89IS670a9jGHHR" + "BCbcfgSTJTmnMWdnkuuDmw5fzJuvIJRSpzGQuErsxHnxfpLMMNEOABstISCMMWIJzJwGGQku" + "zAab5f56mwIJxH5fsCQRcmfpisOPbcPQpzQRjtajHRij9j89HIgqtDisDNrssCBCIJ1bzJpz" + "fp5fBLrBtDBLab67LV45NX3423_" + "02345678abcdefghijlnopqrstuwyzCDEFGHJKMNOPQRSTWX", + "KL01bcWXJKdewxuEEFFGGQPQOPcdMNcmmwsCablvEN8iBCLVKLKUktklisbcnooyxyxHHIIJ" + "JKAKqAgqfgefdecdcm09akopHRsC7hlvCM78QRRSBLstISktIJ1bvF67MW01wGlmmwKUstwG" + "blwxMWVWLVTUlvzJfphrFP121bvFrBrssCBLFPvw23cm2cwx34jtisaj8iHIyIxyMNpzrBBL" + "abHRbc01cmqrijajUVaboyghzJeomnnxLVpqfgxHrsLMxylmHRAKRSMNkltD9jstwx4ersNX" + "uEhrSTDNtDktkuuEhiijajopgh7hJTpzuvDNakNXdepqqAefDNhideij9j09AK01uDhrkl3d" + "uvdnvwwxlmKLrBBLxymnrBhrmwwGmw_" + "01234789abcdefgijmnopqstuvwxyABCEFGHIJKLMNOPQRSUVWX", + "QRktghijbcCDBCabABefkl0astzAlmFGyzdeGHSTEFcmmndnENnxnooyDNtD7hhi8irssCjt" + "JK5fRSktklTUeoopfp4elvUV3dvFwxKLIJCDyIISoyxyFPOXkuuE2cstVWEOblMWWXNXHRwG" + "GHxHFGHRmwdeGHuvuDCDBCABzAyzyIHIGHwGlvDNBCrBqrpqfpefdecdbcblklkttD67wxrs" + "1bqAakghxyCDCMLMKLAKzAyzxywxvwuvfgjtKUGQJTUVdnKUEFlmwGpzno9jzJghAKpzisqr" + "fpopsCrskuGQFGGHxHisrsqrpqopnonxxHGHFGEFuEkuakajqAdn34gq8909015645343ddn" + "mnlmbl1b01098978678iisDNCDuDuvvFQRuDCDsCis8i78676ggqqAAKJKIJISRSQRPQFPvF" + "KUUVVWTUUVVWTU", + }; + + solutions["IBM q27 Montreal"] = { + "1:24:3:5:7:8:7:a:9b::c:e:df:e:g:i:j:i:l:km::n:p:o:p:q", + "gjcdpqlnbeilmpdefiegacjmcfbegjopno7aln47acilcd7a14cf471214_3bcdjmnq", + "cd7aac58be14cd47eg35237a8b1412beop47decdcf23fibe01ilmpaccfln_1578abdo", + "jkno35895889opeg017ampac58gjeg12jmde23cdmp35588b58acop7a4723_03579ekn", + "jmcdjkdebe678bmpeggjegbe8b7aac58jmmpcdpqde3589mp5835cdcfdefihi_3689bcjk", + "jkgjjm12231412lnilfi47eg14gjegac7ajkac47cfde7acdeg14ac7adebede12_" + "23cjkmnp", + "35jmgjmp8b23il7a58acbe358bcdeg7a58begjegjm8bmpjmgjopjmnode89mppqjm_" + "2378almp", + "pq2347cdeg14ln12noacopnoil7alngjdeegjmpqfiil47cffimp35cdgjcf58acjmdeopmp" + "8bbe_27cdelnq", + "be1223de89677aac14gjcdjkjmacdeeg01gj5847147aac6747egcffi7ailaccffiillnde" + "cdilcf_01269bgm", + "47ln7acd5835mpil018b58fi23cf671235acfiil7a23jmlncdnogj35deeggj23ac58cdbe" + "8b89beegde_0468bdnp", + "opfijkjmegcfcdmpbedegjopjmil8b58eggjbedenojk8bcd35ac7a588bbe35delnnocdln" + "cffiopmphi_38fgikmo", + "0189cfac147a478bacln7a14cdilacde1267nocfbeln7afiillnnoopegacgjcdcfacjmfi" + "de7acf4714cd01_0679fnoq", + "accdbeil47147a47cfac8bcd12pq7a14ficf12opno4758ac35cffilnillnacfi7acfac7a" + "cf471401fiillnnoln_124cdelq", + "12deacfi35cd58238b4735147ahiac6747cfdeegillnopficfbe12ac8b7agjillnjkegno" + "58ln47gj89accf01fi14jmcf35ac23_12345aceghilo", + "hi6789op2335be12no2314fi470167cf144714mp7a12illnhiegjmopmp14cdacdecfbegj" + "8b47bejmeggj14egbe018b5835588b_0234569bhlnop", + "89mpdecdeg8bcffidecdjmhibe14cfopgj47cdegdebe148bbede5889cdfijmcfil231214" + "accf7a234714fiil67hi122312_0123459acdefhilmpq", + "jk7a89gj23jmln47beacil01lnmp7ano12oplnilcffi01egno8bcfdeac58237a358bcd58" + "8bacilbe7aeg58477a35lnjmacnolnil_03479cegikmno", + "12beln01noil2312filnop358bde2358be35mp8beg58cfgjacilno8bhifi7alnnoopbe23" + "cf12ilcdjmmpegbede8bbe588bbeegcfjk_0148befhijlnp", + "gjjkhi14accd8begcfdeegcdfi0147deilpqnogjmpegacjkcf7aacgjopcfficfmplncdil" + "7a47decdde147a12filnac7a47no7acfop_148afhjklmnop", + "897apqdecdacegbe8blnnofiopnode47cdbemp35ln23128beg2312gjegjmdebe01jkcdil" + "cfmp14pqmpac477a4714acficddeeggjcdeg_01579abcdilmq", + "acbegjop5801358b5889jmcddeno234714hiegbe127a47de8bcd2314cffibeaccdcflnde" + "cd35eggjil7acfegfi23hi471412011247de23jk7a_0234589aefghp", + "7ajkilaceg352367cd8b35opgjpqcf47acdeegfi587anobe8bac14cfjmilficd58opln47" + "35benocf1214illnil01fi2335cfac58noop23no_2456789adfgklmpq", + "01ln8bjkbe23pqgjcfjmilopno7a67mpeggjln14jmilopbejkac7acdfimp35cfhideac7a" + "eg58il8bbe47opnoop7aeg89accdac14gjde7a01egdepq_02678fhiklmoq", + "67be358bdebe235812cfopegac8b35mpjmjkbegj89jm8b58op7a142347673514mp8901cd" + "no1458477alnbeegbeacopcf8bacbefieggjilegbelnil_01235689befmo", + "bejkjmlnfimpno8bde47op01125823897abecfpqaceg3567mpcd8b58gjbe89jmeg35de7a" + "jkjmillnficfacilficd477aac14cf7a47127a14ac017a477a_04689beikmnoq", + "67bejm4714cf01hi12mpdecdcf8bacln7abedecd47gjfiopac2335decf587aachi8b8914" + "no8bjmbe7afi4767il7aegmpjmdejkcfjmfiopilpqlnilno_012468abdefhlmop", + "acopbe67no588b7agjhimppqjmcd35debeegmpgjjmdefieg2358opnobegjac8b58cdaceg" + "be7ajkeg8b35mpjm8958lndegj35eg8bgjjkgjmpegdecfcd_023568aeghjkmnoq", + "cf14581201cdfiegac3514cf7afiilfibelncd47cffide238b7a58ileg8bop35noacjmln" + "2312mpfi01cffi7a14ac47op7anocdcf12illn4714fiilhifi47cf_0458bcdgijlnp", + "no7a678b58mpgj01il1247eg35bejmgj147aegjmmpopbeac23cdln8bjm7a58nogj4735mp" + "de6714ficfegbejmgj01decdfi8bmpilpq23eg58hicfbeegficf_012345678efgjklnop", + "fidelnegbe8bgj01127acdcfde01cdjkegde4735nocflngj23accffiegilcfopcddehipq" + "beacgj8bop7acd898begacjkbecddegjjm358bcdcf67ficfcdeg7a_" + "0145789begijklmnop", + "acpqcf017a89cdmp4758acjk12fi1435deilegjmno7a89gjegcf47cd14gj23op58mpfi8b" + "jm12cfjk3558be8beggjjmegfi01dempcdbede12opbeac588b35bedecd_" + "0234579cdefiklnq", + "ac6701hino7agj35jm23opln8bmpacileggj35benoeggjln58jk35no8b584712fipqbe23" + "cf7ailhigjegdeopficdbeilgjlnac35cf4714jm12mpgjegopbeeggj8bmp_" + "12456bceghjkmnop", + "op477anode14ilegfi8bgj67acbe5835jk8bcfeggj01ln7ailde23jm35mpacficf477ail" + "cd47acfi122335cf7acd1423deil58op35ln89noeg8b23be358bil58gjbe_" + "0134689begjklmpq", + "8bcd7ampacjm47be7aop124714fi4735debemp5835678b23gj12beegjmjkbe8bgjjm588b" + "beegbe8b3501588b14be47357aegdecdac47de14cffibe018b89il8bbelnde_" + "013678abcdefikop", + "8blnnoil47ac58eggjdelnbefi8b3523cdde35il58becdcfeg8bcdde14begj7a89cdac58" + "egjmmpgj8bfi35pq23jm127acffi584789ac7aac6723cfilficfillnnoopnoil_" + "0124589abcgjklno", + "ac5847be7a12deop8b14mp3558cd23egil12be478bcfno35fi140147decdbecf8bpqopln" + "89eg8b23deacilgjbeeg8bdecd14677aacfi47gjnojm14cflnmphifiopiljk7aacjmmpop" + "jkln58finocffihi", + "jk8bop89mpaccd12237adeac7a477a01cd12beegpqildegjln35jk8baccdegfidecdacil" + "7afi471447cf6723ac35017aac47cffi1447583512il8blnbeeggjegbeno8bjmmppqmpjm" + "_0146789cdegklnoq", + "7aac47cf8914pqln7amp01jm58finocdac35cfbe47cdilopfilndeil7ampgjjm148bhiac" + "ficfcdcfegfiln233547be7adeno588blnbeac12cdeg35584723gj148bjmac12gj476747" + "3523_012479dehilnq", + "8b5835hi12jmopcfcdmpfipq14iloplnde4723gj01cfficfnoacil7acdde12egbeacgjde" + "237ajk14fijmjkcfac677a3547lnfi01148b14477aaccddebe8b583523illn_" + "0123456789abcdefghijklmnopq", + "12jmlnil897anodecd8b58be478b14aclncdhide89mp8b357ajmgj58bepq8bdejmegbe8b" + "12cd58fiaccfnoacopegmp47233523finoilln7ail471214gjfijk1214noacopcfacgjpq" + "cfno_234579acdhilmopq", + "jmhi89becf67fi12mpgjjmcdcfde7aaccdgjopilde14mpfiegbe8bpqgjbeegjkln7agjjk" + "ilegbeopcf4712no1423ln01jm8b584735gjeg8bbefieghi58238bilfigjcf5835lnac7a" + "jm67_24689bcdefhkmopq", + "1223acgjlndecdjmmpde35ilnobehi14opln8bgjfi12hi23iljk477a47deaccfficd7acf" + "debeaceg1489fiilgj8b677a8901pq47bejmmpeg1258bepq8bjk5835be23eggjegjmmpjm" + "_1235689abcdefghkmnopq", + "pq478bgjcfac14cffi7a35ilcffijkhiaccfmpeg23477agj47jmfibe1435hi238bln4767" + "becdac58cfjkmpegilde8bfilnbe8bac7a58no47il14lnilcd1214ac477aaccf67acno7a" + "acfi_124568afghijklmnoq", + "1423op01mphifinocfilac7afi47cfgjacfiopde587aeglngj35il1447be8914nocf1223" + "8bfijmcfaclnmpjk58cd7a8b67cfopgjnopqcddeeggjjmmpopnolnilfiaccdacdepqbeeg" + "jmgjjmeg7abe67cd8bac89cd", + "12egnojm47opac7acd47beac8b8901mp23degjcffiegjmbe147a477a14358b12accfmp7a" + "ac1447fide7abe67hiilacficdgjjmpq8baccfgj7alnaccd14eggj1223fijkde7a47il12" + "gj1447_0134689acdefgilmno", + "acfihi7acfjmcdmp23fi4712ac67ilgjlnjmpqde147abe89egcd35de8bopmp23cfaceg47" + "cdcfjmficf677agj478914accf7abede35fi47egjmilfi58becfaccf2312140147lnfi7a" + "no_135679abcefghijklmnopq", + "de471412147agj47no14ac017abecf89cdilde35acfi58cf357a23ac3547opjmmpfigj8b" + "7aacbecd1447cfeg147agjiljmfi12lnmpopcfjkac7acf677afiilficfac8b58cf897afi" + "hificf_0234679abcdejlmnpq", + "2335fiac14jmcf12jkegmpil7aopjmgj47ac14bempde8bcdcfficfbecdlndebe89cdacfi" + "5823no7a471412il2335588bfibeegac89decdac147ade2347cf67ac14egcffi12higj01" + "jk12gj23_1234579bcdegijklnp", + "acdejmcffimp7ajm67eghijkfiac7a01gj8947ac7acf478b58fibepqde358bbeilcddebe" + "fi14cf5847lnnofiacopnoop8begilfi7acf47deac891412cd7a4714deeg7a12accf23fi" + "127a477a_034569cdfhijklmnpq", + "cfnodeficdjm8beg674758dempcdcfbegjjm14opmp01cdfiegbe8bdehiil357a58jkln12" + "47gjfibede148b122389cdcfac7afiachicdbeilpqfijm47de12cdcffiil14mp12gjegbe" + "gjjmgjeg8bbeeggjjmmpacpq7a8b", + "fiegbe8b01opac127a677ajmilcf23mpgj58eglncdnofi35acdejmop7ahibecd47pq7acf" + "mpjm14pqcdgjegjkde8bcdacjmcffimpilcf01cdbecffiillnnoopmpjmgjegde7a478b67" + "_0123456789abcdefghijklmnopq", + "8b58befieggjhilnjm35128bmpilde588923eg14gjbe124767jm35egjk58pqjmopnobe8b" + "2335bede58gjcdjmgj8914122301acfidebempil128b89pq8bbedecd35decfegdecddebe" + "8b588bbede_23589abcdefghjmnpq", + "pqbe8bilfi677agjln58cfacmpcfdebeilfijm47eg14gjcfjk7aacdegj12cddeno35hi8b" + "egbe8b5835pqegcf4789gj14017alnopaccdde8beggjjmgjegbe8b89ilfidecdcffiacil" + "7a47ln14_13456789abcdehijklmnoq", + "ilopmpcfde35egac7a2358ln3558nocd8bgj4714be12iljmlncf89opficfhiegacpq8b01" + "23gj7aaccfil58debe35ficfac7aeggj47jk14ln01beno127acdac7a677acdacac7a4714" + "122335588bbedeeg89mpgjjmgjmpegde", + "mpopjm23mp14ficf01587aegilcdlnnoopgjdeacfiilbeegcd8b67begjcf7aacde8b47cd" + "cfjm35gj12fieg1458be7a4789ac1223cfhifi12ln14cfpqcdnodecdac7a471412233558" + "8bopnoeglnilacfipqcffiacgjjk89hi", + "23be1447eg8b5835fi12nocdacdegjegcdbejm7a23ac8b7ail583567opgjlncffidebe7a" + "egac8bilmpcffigj58jm89lnmp7aopno01cdopbejkcfilpq471435ln8bopnoln01il14fi" + "4767cfbecdde_012456789bcdfgijlmnop", + "jmcd7adegj47egbeil58ac7aaccfcd12ac146723jk47mp127aacpqjmcfmpfiopjm14eggj" + "3547mpjmcfgj2301eg12jk1423mpbe35il588bbenoeg478914gj67jkgjlneghidecdcfno" + "be01lnfi8bbe_0124678abcdefghjklmop", + "hiacjkgj8bopcd7a5847debe14noac8b7a89ileg4701be2314pq6747cdac12mplndeficd" + "14jkil358bgj12dejm23opcf47mpjmacgjegbe7aac583547fiilfieg141214gj01jkgjeg" + "lnde8bbedebe_0123456789abceijklmopq", + "89jmegmppqgj35fiegbeiljm12mpln47148bhiaccfdenofigjcd23eg35477ailcfachiln" + "jkbeegil67cf018bjm7apqacgj1223egfiildebe58cdac14jk7a35ln8b23bemp0147deeg" + "89gjeg140114de_0125679abcdehiklmnopq", + "12cdcf89hi8bfibe8bdeac5823egcd358b12jmbede14op7agjlnjmmp47cfbe58acil1401" + "nopq8bfi12opcfil89be7aeglnno2335ac23pqfiillnfi7anocfilfioppq471447il7aop" + "accf7a67decd7aaccdde_0134579acdehmnpq", + "67cfaccdde7acf8912fi47opegcdgjnojmbeegmpac8bcfcdop5814gj897a0147jmde23mp" + "acbe12cddeegfipqacgjilfiln8bjm89jk678bbe8b7a35582347eg358bbe23148b47588b" + "7agj471412jk23eg35_234689cdefgijkmnoq", + "gjeg47mp14jm23behilngj35jk7a8b67egbeac8bcd588b0112decdde47beilcfmp8bfi14" + "opeg47pqhi2335898bbeegmpgjjmgjilegdecddebe8bmp5835238b126701beeggjjmgjeg" + "be23358b583523_0123456789bceghjklnopq", + "8bbe12opcdjm58il67ac358b5801egcf14de23becd8b7alnpqno89fi1223egde3547lnbe" + "8bcfgjopegdecdbeacjmjkgjnoegilcddempegjmlnfi7acdcfpqmpjmfiacgjegilcfhifi" + "cfbeacil8b58lnno_013456789bcdefgiklmpq", + "14mpbe7ano8bde58gjficf4723cdopegdebe8b35lncdacnojmil67mp478912lngjpqfimp" + "pqop14jkegbejmgjeg477amp8b58ilgjjmopbe8bcd23mpdegjcd675835be23opeggjnomp" + "jmgjmpcfpqfimpegbeilficf_2456789abdegijlmoq", + "cf7aficdil12ac14471214mp23gjficf89deeg12pqfi35ilno1447acln58be8923122301" + "7aopaccdilgjcfficf351447acil147aacnoopnohicfficfpq67cdjmilgj7alnacdecdcf" + "deilficfac7aeggjjkjm_023456789abdefhijlmopq", + "be23cd12de35cdmpjm8b14opde01accfeg2358il47ac14depq7aac12gj4735no147acfac" + "cdfideop23aceggjmp7ajm47begj67hifiopln8b12nocf0112ln4758opfi3523decd8935" + "decfilfiilfibe8bbecf58_0123578bcefghijlmnopq", + "gj35cffi8b58hiegopiljkbe3547cd7acffide8bbegj23aclnilcdde67eglngjfijm7a12" + "mp23depqgj35cfacficddeegdedebe8b5835231214477aac8912ilno2335lnilgj8bbede" + "debe8b5835231214477aac0114fi01jmcf89mpfi4767", + "cdjknoiloplncffideilgjnobehiac23cd58jmdebe01mp1447ln357aopno2347accf8b7a" + "ac67897acdfi5812cfacpq473514hi23cd581214ilficf47ac89fiil67ln7a4714011447" + "beeggjeg7ajmbemppqmpjm_0245689abcdefghjklnopq", + "58ac7aeg47cf8bopjmmpjkgjbefidecdcfac355814jm1223mpilfilnno7a8b47beopeggj" + "35de1467jmcdpqilcfachi7a67mp47lnfiaccdegac7a4714122335588bbedecfcdjm01eg" + "debe8935122335588bbegj12eg89degjcdaccffiilhiln", + "acgj8b7acdcf47lnnoilop58be358b23egac8901jkfilnpqcfgjnodecd14ilaclnegfide" + "cf7aac7ajmhi8bcfbe58cdfiopac477apq8bacnoil58lncd67denocfegac7aac47cf1412" + "234712figjjmhiegbe8bbeeg_03479abdefghijklmnopq", + "jklnilac7a12bejmficfillndeac4714hi47nompegln01gj7ail58148b47bejm01egln58" + "ac8bnoopfi35cfacgj7a23fi89ac58becdegde35gjcdhi14no67acln8b127a01ac1467no" + "58cddeeggjeg35jkde588bcdac_0123567abcdefghkmnopq", + "opcdbeaceg7alnbe6723degjeg8b35bejmdepqcdgjjkac12mppq237adecf35588bbeeg89" + "decddebe35accf7afi4714017ahiacficf4714122335588bbedecdac7adefiilgj352314" + "acficfegdecddebebedecdac7a471412233558fi01ilgjln", + "89il35fi67benoopjmeg23cf1458acmpgjeg7ajkcdgj8bacbe7a3558ln47jmno14hi8b35" + "be89deeg01beln12fiillncd8bbeopdecdeg2335noopcfpqcddejklnfi14be588bbeeghi" + "477agjjmilgjeghide01cdmpcfil_0123569abcdefhijklmnp", + "jmilln1423be4712de3514no588b23cdbe35cfegcd01gjfi89deeg8bcdgj7aac7acf8967" + "fiilfi1223cfln12cdnobe7adecdaccd7aop67pqmppqdeegjmjkgjeg47be8b7a5835decd" + "ac7a4714122335588bcffi_0123456789abcdefghijklmnopq", + "no8bacmp23017ail6735ln12becf898bac23cdfi35op7a015835cfhicddejmilgjbeno12" + "47acfilnegcd8bopjk14begjnoegmpgjopdejmjkgjegdecdcfnopqfiac7aac8b583558cd" + "8bmpdeeggj47jmilgjegdehicdac_023456789abcdhiklmnop", + "01no14lnbe8bcdopfiilde12ac8947gjbejm7acdeg47be8bdehibegjegcfmp8bopacfiln" + "de89gjnojmcf14fipq58358bjklnmpcd7aacbe23egjm477agj67egde14behijkcdac128b" + "588bbede14be8b58352347141214_0345689abcdfghijklmnoq", + "ln8b125823cdbe35noopcffiil47pqaclncffi8bil7a47gj14eglncdde01benocdgjcf8b" + "47jk89ac7aln47fi588bmpcfopeghifigjegaccffibe8bilfi14lnno5835cdlneggjcffi" + "illnnoopmpjmgjegde2312ac_0123456789abcdefghijklmnopq", + "accdhi0135opildegjjmcfpqbe7aac14eggj237a8bbe12cd5835mp47jm8bcf23no5835de" + "cdop89nompficf7aachificfcddeeg7aln14gj01jk14678bdecdac7aaccddebe8begpqgj" + "jm47jkjmdecdac7aaccddeegbe8bbe_02345789bcdefghjklmnpq", + "7a47jk35cfgjfiaccdcf12il14fihi8b7a58dejm47cd7a23acdeeg890135cf14befi1223" + "7a8bmpgj47cf7adeilcdcfde58acbecfoplneggjiljmficfmpac7a8bjmbe47ilgjlneggj" + "jm8b58no35ln14476758mpil8b1247be23_02345789abcdehijklmop", + "opegjk35acbecd7amp14de8bcdacnoln1247opcdpqbejmgjegno8bbe897aop8b14bede58" + "mpcd8bcfjmgjac23cdgjegdecdcffiillnnoopmp7abehicfgjcd47fiillnnoopmpjmgjeg" + "decdgj35jmbehiilcfcdde8blnnoopmpjmgjegdecdcffi018914016747", + "egbe8blnjmcfmp58gj35ac23finoegcfjkfigjcdde12opjmbe8bde8958illncf35hi23fi" + "12cfno4701mp7a1467ac12opcdde7abe8begil47gjbe8blnpq58358bbefiegjmjk2367gj" + "noopcfnoac7aln47be8b5835231214477aaccffiillnnoopmpjmgj89hi01", + "figjcd8bbe89acegil7acffi47ac14lngj7a4758nodeillnac8bcdop14cfno017a35jm14" + "ac7a4714begjpqmpeggjbejmdepq127a588begjkjmmpnoopmpjmgjegdecdcffiilaccffi" + "ac7ajk67143523ilnoln47no14122335_0123456789abcdefghijklmnopq", + "acopfimpcd14de1235eg7a67opcfac7agj47cdcfdejmfihiil14egacnocfop01cdmpdefi" + "14gj23cfacopbe7a47ilegcdgj14accd7aac01bede8b89ln58cdjmgjeg8bbeaceggjjk8b" + "nompgjopno58egbe8bbeopeg5835mp23gj3558_01234678bcdegijklmnopq", + "be8beggjdecdmpeg14be3501cfjmfijkmpgjjm58pq8bnohiopmpjmgjegnobepqil233558" + "8bbedecdac7a4714be5823ac7a4714122335588bbedecfcdfibe8958352312ac7a471412" + "2335588bbede89il7acfaclncf7ailfiil_0123456789abcdefghijklmnopq", + "deno6747egjm7agjac142312cfcddebefi4714jkdecf7acddebe8bhiil47fi58cfmp35eg" + "hi23be8beg58bedefiln35il2312cd01fijm14mpcfgjac477aacopmpcffiilnoln01cfcd" + "deeggjjmmpopnolnilbe478b8923583523_0123456789abcdefghijklmnopq", + "47cdfidecffi14egilaccdlndefiop12cfcddegj7ajkfiacpqbe477a8bcfegdegjhijkfi" + "cf14acjmcddebeegbe8b587agj8beg35dejmac47237aacmpilcfjmficf67cdde12benode" + "hicd8bcffilnilfi89587a35cfno01accdde89eggj237ajm14124714014767mppq", + "debe23128b7acd89gjmplncf5835eg47figjjmjkbe14hicd23decd8bgjbeeg58cfdefi35" + "8bcdcffi23deachiilgjjm7a4714egmpcfjmop01gjegficf12ilbe58ac89lnno23587a47" + "opnocf14fiil3523588bfi125814cfaccf47fiillnilficf_0123456789abcefghijknp", + "147aaccdcfdecdde477abe12ac148bjmjkdecdcf47de23fi126701cf14ilmpjmlngjcd89" + "egjmdecd477abe8bbe8b5835231214477aaccffiillnnoopmpjmgj8bdecfcddecfbefieg" + "gj12cfjmac23pqopno7ahilnil4735231214477aaccffiillnnoopmpjmgjegbe8b89pq0" + "1", + "egde58gj35cdbehicffideacbe8bjkcdegdeiljmlnegcd47897agjbenoac67opegcfdemp" + "4714fiac7a23il58aclncdcfcdde12cf47fiil35be8bjmln01588bgjnolnopcfil14ac47" + "7abeegacbecddegj23jm47mp8bjmgj12egdecdcf14fiil58ln01no_" + "0234568abcdefhjklmnop", + "cfeg23jmmpjk1412ac357a234714beegaccdnogjjmac7aeglnbe8b47defino01egaccfde" + "ficdgjacopbe14de7a5867cdil35lncffijmcf89ac23mpcf7a1247ilfihinocfpqcd35de" + "jmegacdegjopcdjmacdeno7aeggjjkgjegaccddeegcdgjjmgjaceg7abe_" + "01234678abcgijklop", + "beeggj8b7a58opacbejkmpcf478b3514eglndecdgjbe7aegac0158cffiil67de7acf47cd" + "ac89jmgjpqmp3514jmnogjeg6747debe7aopmpnocdde14122335588bbedecdac7a352314" + "cf47ficf7acddebeac7a4714122335588bbede89pq01cffiil_" + "0123456789abcdefghijklmnopq", + "pq588bcffi35gjjmbeacegnohiopcd58cf8b7afiilbegjeg8bde12lnmpcdac47cf14nofi" + "pqdebe01de89jmcd58ac7acfilmpjm8b4714cdopde67lnbe01decdcf35jkfiaccfacilln" + "nofi7a23jmmpcf4712ac7a14cf35jmopfi6712accddeeggjjkegdecdac_" + "023456789acfghijklmnoq", + "gj1223cd58jkde01be47eggjbecf14jmmp8begbecddecd89ac7acfac8bbedeficfac7acd" + "ilfi47cf7aacopnolnilficfcddeeggjjm14mpegbe7a8b5867013512147aacfi8bbeeggj" + "jmmpopnolnilficfac7a471412233567jkpqfioppqilnolnnoilfi_" + "0123456789abcdefghijklmnopq", + "3512141223beildecd8bcfgjjmegacbedegj8bmpegficfnoln5801128baccd7a47opjmpq" + "mpfiilcffi35hi67cfdebe8bgjjmfi58accffi23ilcdeg7anoln14deno47beac35898bbe" + "cdac7aeg58lngjiljk8b67hideeggjde12mpbeegpqmp14jmbe018b583558gj_" + "023456789abcdeghklmopq", + "bede8bcdmpeg14cffipqbegjde89cdilcffiegln8bbedecd588b12bede8bcd23jkaccfac" + "7afihijmil35aclncd35588bbedecdac7a471412gjegbeficf01no7a8bac7acf67fi5835" + "7aaccddecdac7a4714122335588b7a01lnacilcfegficflngjopjkacno7a67op477aaccf" + "filnilficflnac7a47", + "fimpgjcd58hicfjmdecdaceg7a4735gjbede1489cd58ac7afiegde23cd35egjkgjeg8bbe" + "58eggjopmpjmgj8begbedehi8bbedecdac7a4714122335147a58ilcddeeggjopmpcffiil" + "lnnoopmpjmgjegdeaccf7a47146712233547144712233558ac7a470114477a0189ac_" + "0123456789abcdefghijklmnopq", + "gj4714jmfibemp1258il7agj8baccddeegbe67eg7a67gjcf23355889jmmpopfimplncdgj" + "47nodeegcfac7agjhifi47jkgj144712il23cf588bbedecdac7a4714122347egcd7afide" + "ileggjjmgjegbe5835ln23cdac7a47cf140114477a122335588bbeeggjjmmpopnolnilfi" + "cfac7a47pqoppqnolncfilfiillnnocf", + }; + + solutions["IBM q7 Jakarta"] = { + "1:23::5:5:6", + "563501_06", + "131245_345", + "12350145_23", + "123545_0123", + "13354556_14", + "45351213_14", + "56453513_45", + "120113455635", + "45130112_015", + "45561235_1345", + "01563545_01236", + "13124501_02356", + "3556351345_346", + "45355635131201", + "56134535130112", + "124513355613_24", + "1335121301_0235", + "1345355635_0156", + "0113125635_12356", + "1312354556130135", + "350113014556_045", + "351301563545_013", + "3545135612_13456", + "4556133513120156", + "01134535451301_04", + "013512133556_0235", + "013513351245_0125", + "013556453513_1345", + "124513355613_0234", + "12563513350156_26", + "130135133545_0145", + "13351256133545_12", + "564535134501_2456", + "121335014513563545", + "12133513121345_235", + "13013556133512_012", + "133513561201_12356", + "13563513451201_016", + "135645563501_01346", + "351312560135134501", + "35561213123556_256", + "355612133545_01456", + "4501133556_0123456", + "564501131235_01346", + "12561335130145_0126", + "35451256011335_2356", + "56011335121301_0356", + "56123545130113_2356", + "56351301453545_0146", + "01131235451356123556", + "01133545121356_02345", + "12354513354513_23456", + "12564501133513_01236", + "1301351335563513_015", + "13123501451335561345", + "135601351356_0123456", + "13560145351345_03456", + "3513124535130113_045", + "35561235453545_12345", + "35563513121335_13456", + "56013512134535_12346", + "56351245130135133545", + "1312451335561301_1245", + "3513350156451335_0145", + "3513563501133512_1356", + "3556451335131213_1345", + "4513351256131235_0124", + "12014513354501_0123456", + "13123545135635_0123456", + "13350145133513_0123456", + "1345351213350145_12345", + "3501451335561312351335", + "351335120113355635_015", + "56351312453545_0123456", + "121335015635134512_0246", + "131235451335563501133545", + "1312453501133501_0123456", + "354513350145135635131256", + "3545355601133556_0123456", + "13354501131235560135_0124", + "45351301135635131213_1456", + "01131256351335124535_01236", + "13121356351345350113354501", + "0113354556131235130135_02346", + "0135134535561301351213123545", + "0156131235134535130113_02346", + "13354556133512131235_0123456", + "1335560145131235134535_01245", + "011335124513351356011335_03456", + "560135131245351356351312_02346", + "12133545560113351201130113_02356", + "35451335134512133556351312_12345", + }; + + solutions["Reduced IBM Manhattan q65 (only q51)"] = { + "1a:2:3:4:5b:6:7:8:9c::d:h:l:e:f:go:h:i:j:kp:l:m:n:q:t:x:B:sC:t:u:v:wD:x:" + "y:z:AE:B::F:J:N:G:H:I:J:K:L:M:N:O:P", + "rCjpFGrsGHpxHI8cstwxot78tu67uvfo5645vDvwefuvdead0a01_cjoCF", + "zEmnENnqABqByzxyzAABwxyzxyMNzAzEvwwxuvvwvDLMtuKLotJKfo_mzBEN", + "4bbhhi1223ijIJ454b01bhjphi344556DJpx1223344bijjpbhhiijjppxxypxjpyzzA_" + "0145I", + "fgmn67ghnqIJqBDJABhivD78IJij8czAzEuvcltujpotklfoENMNefdepxxyyzzEENNOOP_" + "6fmIJ", + "klrC89MNjkrs8cENzEcljpzAstpxtuuvklvwwxjkABpxjpjkkllmpxxyyzzEvDDJIJHIGH_" + "9lwCM", + "8cwx23clmnzAklABqB34nqlm4bbhjkmnvwjppxwxghuvklfgfojklmotstvwrsrCtuvDCFDJ" + "jpFG_28nxz", + "efqBfg45nqmnvDghhi4bbhijzAvwwxzElmhixykljkijklhibhjp4bpxxyyz3423xylm12mn" + "zEENzEMNnqqB_5eABD", + "ad45OPde8c4bclefkljkbhfghighNOjpijpxwxfojphivwMNfgLMKLJKijDJjkvDuvghhikl" + "ijlmjkklcl8c89_58aoP", + "010axyaddeclwxvwyzefuvfoxytu454botwxfostkljkvwefbhrsghdejpfgadpxfoxy0ayz" + "zEotENtuuvvDuvDJIJ_15cyz", + "8cxypxjkrCcljpklcljkstkl8czE89otENGHrsadclstlmkltuOPHIdeuvjkvwwxNOENijhi" + "ghefjkfgOPghfozEpx8cjphiotjkstrsijkljppxlm_8ajmsyzCGP", + "JKIJ01qByzHIijxyhiwxjknqABghzArCrsijothi1223zEENfgfoGHtu34bh4bstklbhvwjk" + "hitujpuvtuvwwxijstxypxjpyzrsrCzApxABCFxyyzpx_0jkloqzBCK", + "ENGHFGotfoJKtuDJbhLMvDCFghijfgzErCfoNOMNjpLMrspxstrsrCMNzAvwotxyKLJKfoIJ" + "efotwxABdeqBadHIpxjpGHjk0aklclnq01mnlm8c89cl8c_3bituHKLNO", + "klvwbh78uvwxhipx67cldetuijvDjp8cvwefot780aclst8cklfgjkjpkladclrsrCghpxkl" + "8cwxpxjpvw786778jkklcljkdeeffootfostrsrCCFrCFG_067bdkovwD", + "DJ89mnjpvD8cpxjkGHxyuvklijlmrstujkkljppxclwxjkhiijABklotvwyzstwxghfgxyzE" + "vDhifootstghotDJfoyzIJqBzEnqfgENfozEotstrsstotmnforClmCFcl8c78_" + "9jklnruAGJ", + "IJ67DJjp5645lmCFxyvDefNO56EN4bzE67uvfgklzAjkpxyz78bhrCtuijghothizEfoeffg" + "rsst8cABdeclotqBfofglmnqghbhmn4bnqadxyfgefyzdeqBzEfgENMNzELMot_" + "47efjmxFIO", + "yzGHzAad4bhiijdexyFGwxCFclefjpABpxyzhivwbh454bxyghwxvDbhrCfgzAyzghfoxyfg" + "yzwxrsotghvwwxfoDJtuIJvDotuvHIkljkijsthiotforsrCefCFijdeFGad0a0112_" + "45achizABH", + "CFrCstothi0abhzEgh01yz34rs124bijbhfo4bef45zAfgdexywxstghfomnfgjppx23lmvw" + "5634yzuvtuotuvfostvwclxypxjpwxyzrsotxytu8cij677867rCuvhi5689bhyzzAABvDDJ" + "yz4bqB344bnqJK23_3abghnsAEF", + "JKvwvDNOlmDJyzxyjkpxKLvDxyzEJKyzijFGzAENxyzEvw89kljkjphibhijhiwxpxjpOPbh" + "4bvwNObhijwx34jpENpxuvwxvwwxxyvDhiyzDJIJzEMNDJtuotLMKLxyJKIJHIIJpxJKKLjp" + "ijxybh4bbh4556_9bckmwxzAEFKLOP", + "GH23FGdeklbh8c78LMefxyfg45wx4b34vwmnhibh4bjkKLlmjpijJKghCFDJpxhifgklxyfo" + "jpjkrCbhuvghfgottupx67otyzxyzErsyzfojpstuvijotjkvDDJ56tufopxuvIJENwxNOef" + "45devDklfo34clDJOP2312zEEN_25bcdlnyHM", + "12vDENmnclDJlmJKgh23ijuvnqOPKLjpzEbh34MN784btuyzLM8cMNclpxbhNOwxMNmnkl12" + "hiotij23ghKLvwqBfgeffonqvD01JKOPIJHIGHLMjppxfgxyyzDJghKLIJJKFGwxzEENxy12" + "34HI23IJwxNOGHbh4bCF45HI_1347cgijmuvDENP", + "qBnqjkij12foABNOmnqBMNzAlm45zEkl34OPLMhi23AB56xy45KL34otstzAghwxvwdejkJK" + "efijNOfgEN67klyzzEhibh78foghhi8cclMNvDxyIJyzlmxyLM4b8czEDJKL34EN4b78NOIJ" + "OP23mnpxHI12IJjpijfghibhijfootstforsrCst_134bdfiknqwyBOP", + "zAlm1278adtu67uvsthivDtukl23NOjkfgbhghCF4buvrsEN8c344bjp89DJhiijxypxjp78" + "styzrCzEwxJKrsotxyclvDjkvwwxDJKLLMvD23vwkl12fojkfgstjpgh8cotklclJKpxwxjp" + "vwfovDlmbh4bDJyzvDxyvw45fgbhmnnqghqB56ABhizAzE_167cdfimrstyAFO", + "fo23mn8chiefijlmgh4bde56qBOPefNOfokl344bclzAot45stEN34zEjpABpxzAbhrs23jk" + "89klqBAB7867nqjphijkvwwx56pxjpvwvDjkijqB45mnwx4bnqkllm8945mnpxclvwjpwxbh" + "DJ8cpx89vD8cxyghIJfgfoghlmmnvwyzotzEENvDNODJIJyzvDHIIJGH_" + "2689bcdhinoqvzP", + "GHlm12fg23foJKFG89OPghefNO8cclhijpijlmjprC8cotDJtufostyzotuvENMNefvwstCF" + "rs78zEstENtuuvzAzEAByzfo34vwfgforCwxrsqB4bbhrCstpx67hiijot56hi45jpNOpx34" + "jkfoklclxy23yzghfgOPbhfonq4bzEghmnENlmcl8c34bhmn89cl124bvwMN_" + "19efgmopsyCHKMP", + "hiijJKzA2356gh34fg4b8cjkhi45yznqkllmghbhmnjpDJKLFGJKijclvDnqxyklvwwxOPot" + "hilmpx34GH23jpHIfofgjkxypxyzijvwNOkljp12uvjkxybhij4bzAyzbhDJvDghENzEtucl" + "34bh4b45uvbh01pxotyzxyhituotfoijghotefdeadfgpxef0adejpjk01kl0acl8ccl89_" + "268fghkpqtAFKLP", + "564bot0aENrCABvwCFstjpfozAwxyzrs67fgrCzEpxjkijtu45bhwxnqxystMNghadvwbhde" + "4bjpmnhivD78DJwxLM8cvwKLot45efJKclfoefyzotlmbhfguvfoijdetuklmnhiijvwuv56" + "rsot4bbhvD45jpIJpxHIxywxDJtu4bstjkIJvw34tuijyzuvhixyyzzAghABfgtuotefyzfo" + "deaddeef_0456jkqstvBCEFN", + "8cfgghCFNOOPrCKLENLM0112FG0ayzNOrshi4badzEIJzADJbhENHIdezEefIJstotABclfo" + "uvvDghijDJvw23otfgjpfotugh34zAvDMNwxENklGHmnvwIJqBHIzEpxotjpxyABjkuvwxnq" + "bhlmtuvwzAqBmncluv4bvDjpnqABpxqBklcl458cIJJKcl4byzlmclzAxyDJ8c5689mn8cKL" + "clnqJKlmLMMNyzNOOP_0148fnuzCFHIKOP", + "ad8cotxy78clGHvDhighpxfg34xywxDJvwjpHIuvzEFGGHijIJpxhiHIfotuOPJKjpKLyzuv" + "LMMNbh8cghABwxijfgENxywxvwzENOhi894bpxuvENtuzAyzxyABwxzAqBzEyzefIJxypxde" + "jpijhighpxotMNbh45fgnqyzefghclfoAB4bdezAstyzJKklKLotcl8cNOstefxyadqBIJyz" + "pxrsbhrCstABzE45CF56deLM4567yzMNNO4bMNLMOPcllmmnclnqqBABqBnqmn_" + "4789aefghijoptuvwyzBDEFGLP", + "pxnqlmDJ34jp8cHIfgotvwIJmnqBefhigh23CFFGGHijhibhuvkl4bOP34ijtu67JKrCstfg" + "hiKLCFbhrsvDvwrC4bstjprsDJot45cllmghijxyHIfgNOJKnqfoMNotvD4bhijk56wxDJst" + "jpIJefbh4botghpxvwjpfofgLMHIvDijwxotghKLdejpvwwxpxjpad0auvhiijbh4b34bhJK" + "23st34hiijrsDJbhJKjk4b45vDuvbhkltu34otfoeffoot231223tu01340auv01_" + "23578befimpqstwxBCDFGIJKLP", + "ABDJ56clxyyz45ijjppxENfohi0azEijmnxyfgJKfojppxlmkltughKLxyMNbhjkLMyzKLEN" + "78NOefIJxy8cvDjpvwclDJ4bpxzEfglm34otENzAyzefmn01hi12wxOPzEEN23deuvpxij34" + "nqABqBghjppxxy23wxvwwxhiNO45stadABnqbhvD56mnzAENzErs67ENyzxyrCzEwxNOvwij" + "OPwxghpx78jk4bfgABlm89ENuv34clkl8cjkjpzEclijpxghlmqBfonqtu23rs78st120134" + "wx67hivDotmnlmcl8c78cllm_67abceghjnoprtyzBDGIJLMNOP", + "0auvijklwxvD45DJvwmnxy34tu01otwxuvvwyzstfo4bvDMNot89pxzE23rsxyyzwxzAst12" + "JK8cGHhiENbhDJAB23tu0178stjkxy0aklFGjp34IJHIpxvwfgCFjpuvrsJK67GHrCwxzEcl" + "jkFGghbhklfoKL8c4bcl5645NOijpx34jprs8cpxeflm8923MNjkLMfgvwkllmjpyzJKIJxy" + "12wxfopxjpyzxyzE8cotyzHIJKxy45tuENwxvwjkKLvDvwwxJKNOpxOPijjpIJpxwxvwklhi" + "ghuvjkclJKvDtufgzEENDJJKKLDJ_359achilnprstuvxyzDEHJKLMO", + "nqGH01zAIJadqB34bhzE12mnAB89wxOPDJhi4blm0aghnqjkFG4556jpLM45ENrCvDmnMNde" + "xyijyzzAqBnqpxfg78efvwfoxyyzrsCFklottuzEENjpstijhipxghxyefrCaddersJKyzIJ" + "jkHIijNOuvlmfgbhjptuABef4bwxpxfohixyvwstij23otjpuvfgtuGH67453456ghzAjkzE" + "jpbh4bzAhi34pxklqBmnuvfovDstbh45efABrshijpdenqqBjkDJijuvadlmhicl8clm34zA" + "efjpfg23fo454bgh3412234b78px01klJKclkl_013569abgiknqwyzBCEHIKLNOP", + "zEENadABvDOP23yzotbhNO0afgfoghGHotstijefDJtujp45zEmnENxyadfg12yzbh4bFGpx" + "qBlmzACF34otfokldehi23wxef01ijrsrCdexyyzghjpbhfgnq4botvwvDghpxuvsthi34wx" + "xyfg23rsyzDJtuij12adzAJKjpmnwx23zEfootfoghfgvwwxghbhpxhibh4bbhijjpjkhixy" + "8cbhpx45vw34jpAByzxyst23otqB4556yz45foFGefclpxdeklwxvwefjpvDfootjkGHstHI" + "ijDJ4bIJwxjpHIpxGHxyyzzEyzxyklcl8c89kl_01258bdfijnoprstuABCDEHJNP", + "ENklCFjpKL34ijGHadmnJK0a01DJghrsfg4b34OPIJHIlm23devDad8cbhIJrCghLMhifo0a" + "fg01zAMN78otfojkfgefotclvwJKKLpxJK45dejp8cadABNOwxtupxxyghfootyzIJLMzEqB" + "bhfoEN12zAuvMNijstDJ6756HIzEefnqotGHmn4bFGqBjpclfopxJKklfgxyENKLdebh4556" + "zAABzApxJKefyzotqBnqqBjpghHIijtuhibh67uvijjkLMlmvD4bNO34uvmnDJMNNOOP4bfo" + "MNdeotjppxfojpLMtuwx23uvvDDJIJuvDJ_013578abcdfhlnpqrtvxzADFGJKLMNP", + "bhzEABDJvDIJnqefadstqBENpxABdetumnwxzA670avw78ot56jk8cnq4bkl89OP67ijyzDJ" + "4578xyghyzlm23hizEuvjpijjkxykl12fofgghbh4bwxpx56tu4534otwxvDjp23vwpxNOfo" + "uvhimnijwxbhvwENvDpx4bDJvDNOclklghzEjpOP45ijwx6734IJyz56fggh45hibhxyuvhi" + "56ijbhpxtujppxyzotjkkluvfoxyyzzEyzvDDJENMNIJvDef4bHIuvtuxyIJotstpxforsot" + "bhENjpghfgghfotuuvvD67DJvDuvJKtubh788c78_12456789abdeiklnoqswxzAIJP", + "OPLMvwbh4bqBef67vDwxde8clmENNOnqkl780ayz45ijfgclvwlmxyKLhisttufojpmnJKuv" + "ij8cbhadef4bhibh3412wxcl01zEyzghot45xyvwDJzEENfgpxjpjk67wxstuv4bzE89hikl" + "ijENrCturs56rC67560a78cl45adfovDnqothivwghvD67jpst12zA56AB454bpx8cyzCFxy" + "mnzA56pxfgbhjpfoyztujkjpgh89pxbhxy67MNefzEDJde4bENzEfgpxJKfoyzotzEjpfost" + "KLef45ENfo5667NOOP56rsotENstrszE45otforCrs_" + "2345689abcdefgijlmpsuvxzBCDLMNP", + "zENOFGuvtuGHyzwxstpxhi34vwfgrsDJbhij45ghvDHI01DJIJOP4b89mnbhMN5623ghxyjp" + "344b45vweflmijwx4bjkfg56hiENyzbhzA4bxygh34ABvwNODJfopxuvvwbhENyzzA12rCxy" + "tuwx8c23vw12NO34jp4bstpxvD45bhzECFotyzHIvwhifoxyyzzEjppxDJcllmwxjkENyzmn" + "vwkllmefijuvGHjkNOnqJKDJENcltuvwjpmnpxstwxrsstrCdevwadvD0adetuDJuvjpkllm" + "kltuotxyyzstrszEjkklclfoyzstoteffojpvwstwxvwpxkl_" + "024569bdefhiknprstuwxzCDEFGJKOP", + "78FG4bghGHyzstijzExyjpFGhiclotklbhnq56yzrshi8cENvDlmzEpxwxCFxyyz01HINOEN" + "rCKLjkjpijLMpxIJDJjpMN45sthiijzEwxfotu4bzAmnpxfgvwfojk34otefbhst67rsgh4b" + "vDCF23st340ayzadbhxyEN56pxOP78yzotfgdekllmjpwx67efNO12gh23hifoDJijENjkfg" + "zEkl01vwcluvxyyz8cxyzAjpwx12vwpxbhABwx4bbhgh78vwjkhixyyzbh34ot4bbhhi8978" + "34xyij23jkuvtuhibh344bbhkluvjkvDhijppxjpjkDJIJDJHIwxvwuv_" + "1457bcehjmpqrswxyzACDEFHKN", + "stDJvw67foMNfgrs4btuENxyIJ0amnGHyzvDlmbh34zAHIFGwxxypxNOkluvijgh56DJxy45" + "yzadrC34GH56vwsttuuvdeotAB0a0167zExywxxyvw78EN8c0afgIJhiyz89HIjpwxtupxfo" + "efotwxjk67jprstuuvvDjkfoadotDJGHJKzAij8cstotxyjkkltucluvrsvwklforCdepx8c" + "wx89jkIJKLjppxot12vwCFxyJKvDijfgghDJyzbhvDfohi23ad34de0abhzE4b34yzvwot23" + "4bbhENIJHItuxypxGHjpxyMNjkkloteffocl8cefHIcljkjpdeaddeefot_" + "0134679dinorsvxyzACDFGIJMO", + "jpjkMNyz6756st12dersENklwxghrCzECFefvwLM01uvtufopxvDjp23MNfgENAB45uvNODJ" + "ijstclwxlmmn78qBtuzA67rspxvwKLjpFG4bJKwx56vDot0avw8cbhnqwxDJjkKLklLMpxfo" + "KL45MNijvDghOPuvJK56rC34qBfglmIJxytu4bghbhsthiCFotijjkjpvwwxNODJ89vw23fo" + "34uvefdetuotpxHIklmn4bwxIJvDadstfo12DJ01yz0almjprsjkstotvwbhjpstcl4bwxde" + "rsfo34rC2334pxefklwxJKfocljkjpvwdevD8cCFjkKLvwwxJKLMclDJIJHI4bGHFGGHHIIJ" + "_12678bcdghijklmnopqrstuwxyzACDEFGJMN", + "pxfo34qBDJefvwotkl4bJKyzOPzA89DJ23KL4578mn56lmdexyad0afoJKABjpwxstijrsot" + "IJ67clyzfghiuvbhrCzAyzvw4b128cxyvDwxJKpxyzjpjkEN01ijHIzEklvwclNOtu78hiyz" + "uvCFghxyDJfgpxMNGHIJjpDJ45ijyz56wxEN34zEhipxjpbhjkfoLM4b4567KLijENHIvDgh" + "otMNklvw56NOjkklFGhiwxstfgghbhENclpxDJ4bfoyzvDIJCFDJrCot2312fgstghrsrC8c" + "rsclxy8901fg45LMFGfopxGHJKMNbhfg0aHINOOPNO01MNstjpLMKLJKpx12wxotvwtuuvwx" + "vDpxuvtuDJ_2345679cefhijlnopqtuvzABDHIKLNP", + "nq4bzAHIfg45clvDKL67fo56mnDJhi3423JKvwuvklwxCFbh8c89otstclLMijjkvwlmgh12" + "45yzhi8cFGxyzEMNadwxtuNO4brsstKLLMrsuvbhyztuENOPvDij4bvwhi78DJkljkclmnkl" + "IJABxy344bwxzEyzHIGHrC0azApxABghxyNO678cJKuvvwcl8cwxfgjkjpyzghefijbhzEvD" + "ENKLpx4bfoJKhideHI89ijMNIJjp56zEJKuvxyotfoKLwx67px342378yzxybhpxrszEENyz" + "vwjpHIzALM34st454bGHij67bhhivDyzvwij56bhNO45FG4b34jkbhkl23efDJIJ12HIIJGH" + "DJdeadde_12346789bcdefghijpqruwxyABCDHIJLMNOP", + "ijpxENABrswx1234jp67zE78lmrCCFNOhixyfgzA67qBpxijMNAByztu8cjpstrs45uvwxot" + "fo01vD2356tuvwwxpxxyENLMjkbhvDotghefdeDJuv0ajpzEvDfg4bstpxvw3478yzhiFGef" + "89footwxbhghfgrCCFHInqijrskljkuvvw4bDJadrC12JK45clstfoqBzAKLot8cLMIJpxhi" + "klnqvDMNijlmjkDJJKvDhideuvgh01CFrsfgjp67clKLmnfoefbh4bfo0anqkl01hidepxtu" + "lmad12rCuvjkxyijjpFG0avDpxrsjpxyCFIJHIIJtuyzDJFGJKijhimnvDnqGHghqBadvwzE" + "FGbhEN01NO4bOPnqxyENmnzE_1245689bcdefghijkmortvwxzABCDFGHJKNO", + "foijwxvwrsrCpxvDwxxy34yzbhvwjpotghCFclKLhikl8cfg4b23cl45wxghdevwstjkklFG" + "ijrsfoLMNOadhiMNefDJbhxygh56deotfotujkstIJadfguvLM4bwxvDrC67DJvw78455667" + "3423KLlmENijuvklzEENjkvDHIotOPvwfowxghtubh0a4buv45ghvDJKIJxy01CFDJbhuvmn" + "JKrszAfg12ghbhrClm56KL4bklstotrsNOfo67bhJKrCefstot01fgIJdeghstadfohirsEN" + "st0abhvDvwpxotyz78xyrCstCFefwxzAfgrC01pxvwABvDrsstzAdeyzjpzEyzad890axyad" + "pxzEjkklcl8cclxyjpklijhibhhiij_23567abdgijkloprtvwxzCDFGHIJKOP", + "OPrClmbhJKkl78cljkNOijghmnzEotENfgfo8ctuMNzA23hiABklghzELM56uvfgclKLJKst" + "4bqBnqtu67jk56ijIJefstbhde45EN89klqBrs8cDJvDzAABvwlmHIhiuv34jk4bmnzEENjp" + "wxadMNstIJxyghfgtu23klyzvwDJijxyzAjpuvstpxENrC34clzEjpxyjkefhighwxzAqBde" + "ijABbhzAqByzhi67ENxyjkklwxnqghvwLMqBvDwx01MNlmpxclijtuotLMjpyzfozAefABst" + "DJrsstGHjkotpxFGIJkl4bzAjpyzxywxtu12px23fo45wxvwwxpxclfg8cefyzjpzE56cl34" + "2312otEN67564bNOklENjkzEyzijbh_0245789abcdefglmnostvwxyzACDEHIJKLMP", + "HIijjpvDgh898cuvtupxstzEDJ780aforsxyotMNJKjkkljp4bfofghistghnq34rCmnpxrs" + "vDKLDJcluvENbhwxfoijvwCF23JKzA4bfgAByzotefghstbhKLjp89defo67rCzA8c45pxfg" + "IJadfoghefhi0aotfgijrsfozEefxy4bbhghhizAklqBjpwxfgABtuijjpzApxjkDJlm45jp" + "dejkfoclvDhinqstvwFG78otvD8cforsrCeffgklstLMdeHIqBrsCFDJrCghrs34sthiyzij" + "zEyzclkljkot4bfoxyefGHclbhpxjpyzijENdeaddezEyzefhixyfobhij4bwx34otpxwxpx" + "ENvwvDNOOPDJvDjpijvwENpxIJxyyzzEyzxypx_" + "012345679defghijknpqrstuvwxyCEFGHKLM", + "ijJKwxxyjkyzadklrCzEwxCF4btuvwef01fglmDJ78ENvDstfokljpMNzAvwABtumn6756px" + "IJclhiefdeNOwxrs0apxFG8crCefxy23bhDJuvad45GHjpHIpxyzghzEvwwxjkjpijhistpx" + "wxvD4bDJuvtu12xyfo34otyzIJ01jppxxyjkklqBvwijENjkbhklOPnqzEyz4bcl3456gh0a" + "zAlm23jpvDpxJKDJijvDrsstjphiKL34NOtukl45LMijMNuvfg56LMjpJKmnghqBtu34jkfo" + "hipx67nqbhhieffg237812ij01stjkklvwdeqBefadNOABzAdehi23xylmKLJKKLOP4b5645" + "0aAB4bpxqBzEIJEN34ad4556jp6756HI4534ijIJ23mnbhhi12jpghpx_" + "0278abcdehijkmnorsuwyzACEFIKNOP", + "wx45nqotfo784b56fgjpvwuv0a45LM67ijjkkltu34zECFvwlmuvDJcltubhMNNO568cotIJ" + "fohiJKmn78rCijefrsjkhituvDnqxyKLwxLMyzpxDJvwJKqBENuvtu67vDzEstijjkklde4b" + "KLDJnqxyJKcllmKLotwxfojkbhyzmnhi4bENst34fgNO45zEvwadpxghrs8cij56hijpzAij" + "78nqjk89pxklclmnwx78ghlmuvfgclstottujkfo4buvot67MNfozE45qBvDefENhibh4bot" + "MNLMfo89454bzEzADJbhvwxyvDhideIJotuvABKLJKadKLHIqBzAtuDJIJDJuvvDfoijhivw" + "rCjkzEwxefENotzEghCFFGDJxyfgghfohiefNOdeOPJKKLfoNOJKDJot_" + "0345789abcdefgijklmoqtuwxyABEFIJKMNO", + "67ghfgMNEN8cjk78pxLMjp56pxfohirsDJklNOxyghfgwxJKrCijcljpMNKLJKLMyzxystot" + "0ajk45zEhirsENqBijpx67yz8cIJjpkljkHIclCFfobhxyNOhiFG01GHnqDJzEEN8c784b34" + "OP23px12uv8cNOghxymnklvDENtuclotstjp23fghijkvwlmef34pxrCyz8crsmnuvotMNCF" + "xybhpxfoDJotFGghrCjplmstwxrsijefpxdenqvDyzxyad89otzEef8cfgDJfovDklyzfgJK" + "jkIJghotclstotxyqB8cjpJKklpxKLbhjk4b89jppxjkwxlmLMmnvwnqhivDABKLfolmbh45" + "otkltu4bJKMNENDJ45zE56efIJzAABdezAefzEENvDstrs45jkqBMNbhstnqqBHIGH_" + "56789afghjklorvwxyBCDFGHIJLMNOP", + "MNEN78vDnqyzKLFGABvwjk89DJNOwxmnGHclvDIJvwstLMijuvCFotklFGzE12xyDJjpefJK" + "qB6723bhjkfopx8cadOPghHI78yzzAxyhiijcljpfgAB0aENMNpxjkyzxyghjpKLLMbhlmot" + "wxvwvDklzE45DJst8ctuNOrsrCadvDqBzAEN01CFABuvIJpx12vDwxnqzEclhistMNjkyzDJ" + "4b34JKijfoqBABmnghtuGHfgotjpghklENhipxnqqBtughfoefHIdefg23ABxycljkefFGqB" + "adghIJdeHIbhzEuvvDnqJKGHDJvDfgKLjpklLMMNclFGNO8cGHMNclLMklpxCFjkKLJKkl78" + "mnlm4b34klgh23fghi45ijjpuv56foij67hibhwxotvwhiij78vD67DJ4bvDfoIJHIvw_" + "015679bcdegiknqsuxyzACDFIJLMNOP", + "rCclMNjpvwCF01tu4buvENpx8c7889vDFG45lmotGHfoDJotjp56bh4bnqABwxmn34klhist" + "bhlmzAjkrsfgnqcl67vw8crCyzJK4bCFjpxyOPwxijotKLyzfo0agh45otxyvDtuotjkef78" + "delmmnbhfgfoFGNOadcl2356hiotDJstdeefotrs4bfonquvfgghijvDDJMNpxjpJKijvwot" + "LMeftuIJKLqBHIhibhotuv4bfoefwxvw4534ABfoJKzAotGHzEENtuzE23zAijklpxvDDJvD" + "4bNOOPjkkldeadlm0anq01stjpijtubhuvadtuuvvwhiijotwxpxjpfoeffoIJotstrsijwx" + "hibh4bvwvDDJvDvwwxJKrCrspxstotfofgxyfootstyzzEENMNzEyzghxyrsbh4bbhgh4556" + "_12345689cgjlmoqsuvwxBCDFGHJKLMP", + "GHfgmnnqfoDJstbhqBottughhiuvbhjp8c0a4bJKfgrsforCottuefCFuvzAbhijyzvDKLhi" + "fovwwxotstdeuvzArspxtustefENDJuvvwghfgrCwxpxjpadjkpxxyzEuvLMyzABefMNders" + "rCxyefpxCFkljpijzEhipxghclhifo8c89ot8ctuuvvwvDwxxyDJzANOENzEJKvwLMOPzAyz" + "ENNOENzEKLyzxypxjpijhistefIJjkbh4bde45kllmadDJ0a4bvDbhijgh56effgghbh4b34" + "2312010aadmnwxfopxhi4bfgghbhfgnqghFGotstqBfofggh4b45bh4b34bh56ghfootrsCF" + "rCrsstotfofggh234bbhhiij4bCF34jkkljpijlmhiFG4bcl8cbhhipxwx78vwvDDJijjk67" + "8c4bmnIJnqklwxjkHIpxjpjkpxkl56wxcl8c", + "MNuvmnJKIJwxnqABCFHIzA34yztu23xyzE4bvwfgDJLMwx78styzvDENfoNOFG34zE12MNJK" + "vw45xy56pxjpuv2312yz01ijjkKLklrslmqB8cLMhighENxybhpxmnfgjpJKgh4bfgtu45zE" + "DJjkIJ12kl67st78vDDJyzij89NO56zEhibhclotOP0axyghuvjk4brCklforsclpxlm34HI" + "GH23vwtujpwxotstpxjprsrC67jkNOkljpeffoHICFwxotdeefvwfgghbhdeMN4bFGtufo45" + "KL56ghIJfgghlmJKKL67vDvwmnLMpxwxvwrCvDpxDJuvjpijvDjkijhi78DJnqMNrsghKLij" + "uvtuklotstfoJKfgotfoNOfgIJmnstghKLrsHI67bhlmmncl8cGHOP788cclkljkijhibh4b" + "4556rC3423FG67124501341223340anq45561267", + "foqBnqvDjpDJxyhiIJfgMN1201ghotHIwx0atuefjkfovwvDdeyzotklzEadxyfgijDJyzwx" + "67vw34uvpx5645hifozAENNOwxzEjkJKtuotvDghpxbhcl0aij4bst34rs23mnxyOPstzAAB" + "efENzAzEforCotstvwDJvDIJDJLMrsefuv34IJvDstFGbh4b342312010aaddeeffg23MNtu" + "uvrCHI34GHghvwwxxyyzHIzA67xyENfoABqBfgotIJghnqABzEzAABqBnqDJNOmnstlmfozE" + "ENcl8c89kllmmnnqABzEkljkjppxxyyzzAABqBnqmnCFJKKLLMhirCzEclkljkMNJKcljpvw" + "bh8cijclrsklvDjkvwDJxywxpxvDjpxypxxyyzuvstotfofg786756454bbhghfgfoottuuv" + "vDDJJKKLLMMNENzEyzxypxjpjkklcl34st89NO23", + "yznqmndeijjpDJxyqBhiABwx01vw23bhjkuvadijderChiIJ0atuklNOvDuvpxzAtuyzHIot" + "clqBnq8cxyKLst89foJKqBENwxLMjk56tuGHefFG4brsjpvwzEyzkldefgghDJlmIJ45rCad" + "efxyvDMNdeENLMefzEcl348cjkuvklENpx23lmhiMNijKLLMjpwxadENMNtuDJhiotmnHI0a" + "adxyzAvwfoEN67ABCFqBstIJeflm12HIzAdezEefGHzAcllmFGmnadfgnqHIDJqBotAB01gh" + "fovDzA56yzbhxywxvwvDstrsxyDJIJHIMN0azEfgGHjk8c78ad4bFGyzstghijfgxybhpxxy" + "01hi4567zEENijfojkNOENotfo8czEJKKLfgCFLMrCKLyzgh34xy23JK4b34zEENNOOPNOEN" + "zE_124678abcdfghiklnopqstuvwxyzABCDEFGHIJKLO", + "GHzAbhmn4b010afojpxyABFGfgMNtuvDpxotyz34IJDJghfokl78fglmbhCFefghdeLMstvw" + "zA12HIvD4b8cxyNOjpjk4523pxwx89MNOPrs56rCvwnqklIJyzjpuvvDbhtuclotpxwx67DJ" + "zEuvKLstJKrsfozAhiijENNOjpbh4befzEGHfgpxjkHIhi8cENxy45bh34jpKLvwuvCFhiwx" + "4bghjkvwqBijkltuuvdevDstrsDJABotyzzA56xypxtufozEcl23jpLMIJyz34KLvwENvDij" + "jkhi78NOijjkDJ4512uvbhqBadtuJKottudevDDJ4b8chi012312IJDJvDKLklijuvtuotjk" + "foijnqlmhiHIgh67mnbhklfgghefnqfoottuuvvDdeDJIJDJvDbhuvtuotfofgfootstrsrC" + "rsstotfo_013456789bdefgjlmnstuyzABDEFGHIKLNOP", + "uvtufojpuvrCwxefyzzEvwENde8cot4b01fghiuvzExyDJclwxyzENOP12vwGHxypx2367st" + "fo4578ot01zAAByzsttubhadhidejkgh89fg4bqBkllm5645MNHIforsuvJKij674befzAvD" + "CFrCxyfgjp8cpxABhiijhiadjk34otbhghfgqBFGkl78tuxyfoGHDJefuv45ot0aclIJfo67" + "yzdelm56jphiaddeHImnvD45nqclKLef4bijzADJhiqBfoot34jkJKtuuvotfoCFEN23vDef" + "bhdeDJijxy120apxwxadhiJKdeghIJefKLfofgklJKotKLlmxyvwwxijLMstfoKLotvDyzpx" + "fojpzEstpxfgrsrCtuxyghwxstENDJbh4bMNIJbhvDNOklHIGHvwuvyzxypxjpijhiijbhjp" + "4b34px4bxybh_024789abcegijkmoqrstvwxyzABCFGJKLMNO", + "78xyGHhiijkl8cjkghhiMNclij67ENyzjppxxykljkjp78pxvwuvwxNOzEtuxyOPyzxyot67" + "fopxefzAclENfgghABhiijjppxyz8cxywxvwuvwxyztu89xyzEyzstotfoxypxzAjpclfggh" + "deefhiijlmbhhiijjppxmnwxvwwxxyyzkljkzAqB4b8cijABzAzEDJJKKLLMMNENzEyzxywx" + "vwqBbhfguvIJghhiJKbhijtuvDrsuvjprCad4bpxxyMN4534tuyz0anqotdezE23KLfosttu" + "LM12rsuvMNNOJKvwMNDJvDDJJKKLLMMNENzEyzxywxNOuv4btupxCFstKLrCfgefbhghotjp" + "foJKIJefjkadkldecloteffolmtuuvvDHIGHDJvDuvtustrsrCCFFGGHHIJKotstotfoef23" + "12010aaddeeffgghbh4b458c34566778clKL8c7867cl564534", + "DJ45vDJK67AB34rCpxFGDJ56CFzAjpKLxy78yz8cuv45IJLMbhqBwx67ABvwJK34zAjktulm" + "HIzEvDuvottufo7823otfoklefKL4bmnnqvDqB56ABuvMNpxLM67xy12hiMNGHij78deadfo" + "wxvwjpDJpxwxvwvDNOENvwjk34kl4523yz34clDJ4bxyjppxIJwxvwuvlmDJvDtuzEmnvwyz" + "jppxMNkljkuvtuotfofgghhiijjppxwxvDDJJKklKLst238cDJlm12clfo56tuvDpxbh45uv" + "vwhitu89lmijotrCfofgjppxfootxywxvwrsuvstotfotu67rCefmn8cdenqclklfgotghef" + "dead0a011223344bbhghrsfo45jkCFjpottuuvvDDJJKKLmnLMHIMNNOENOPNODJENvDzEuv" + "zAABpxjpjkkllmmnnqqBABzAyztusttuuvvDDJIJHIGHFGCFrCcl8c7867", + "ENGHvDvwijDJuvJKefKLhijp01tupx12wxstjkrsdevwghzArCLM56MN45vDfgxy34jpijwx" + "vwyzadxyhifoABuvzADJwxIJtu23stpxvDjpbhijCFjkklyzrszEvwlmjkxywxABzAqB4bDJ" + "efABrCuvmnzAtughyzvDzEHIstotDJvwbhfoxyfguvpxhinqxyJKrsKLstbhyzLMvDtursDJ" + "JKKLLMMNNOENzEyzxyotpxfojpjkklOPefjkijvDwxFGdeCFuv786756454bbhhiijjkklcl" + "ghadtulmcl67px8cDJqBjpNOij78bhlmOPnq6756mncl450a34nqEN4badzEbhqBklrChide" + "ijrsjppxeffostotyzstfoxyrsefdewxrCjkjpCFvwuvpxwxvwpxjpjkyzvD01zEadkl0acl" + "FG01uv8cDJ898cclklIJDJvDjkjpvwpxwxpxvwvDjpjkklclDJJKIJ8c89", + "xylm0avwJKad34uvtuhi238c78depxyz01wxqBjp670aclpxvw8cuvnqwxmnlmclfg89sttu" + "kl8cghadfg01jkjpstpx78vwvDrsDJwxKLIJxyyzJKotefijzEENyzvwwxvDhistjpbhDJpx" + "MNvwwxIJNOvwjppxxyyzzAABqBnqmnlmklrCij4bzEvD45rscl12ENyz0ajpklhiuvclrCtu" + "jk8cCFbhotjpGHclOPMNghstkl01forC4b0apxxyLMdeijefyzfgotzEfo45fgstrsghotKL" + "uvjk56jpvwbhJKzAyzpx34rCklwxHIijxywxghyzNOIJ4bHIvwfguvENzEyz45tustxyotjp" + "MNfo34fgghFGhibhhiCFFGij23jppxxyyzzEDJNOrsENrCvDvwwxpxjphibh345645566745" + "4bbhhiijjppxwxvwvDDJJKKLLMMNENzEzAABqBnqmnlmcl8c786789rs3423", + "wx45jkCFFGfo56678cfgvwxydezAghklhiwxvwuvyzvDDJtuuvtulmzAotjpfofgghbhABij" + "zEhijk4bxyJKadklbhstyz4bvw34pxxymnwxKLzAvwlmzEABijotqBuvtufo23uvjp0ayzhi" + "efABvDDJ4bEN12otjkghzEfoIJaddeefyzadxyrszAvwwxvwstdeuvpxENtursfgghbh4b34" + "2312010aaddeuvzENOvwxyyzxywxvwvDDJJKKLLMMNENzEghrCvwKLOPstCFjpNO4brsjkpx" + "ENsttujpkl89zEjkjppxwxvwvDuvDJtuIJJKyzotOPfoHIpxjpfgcl8cmnlmkljkjppxxyyz" + "zAABqBpxclIJghbhkljp4bvDtuijuvHItu45otGHhi4bbhfo4bvD45ghfgotDJtuvDvwvwuv" + "tuotfofgghhiijjppxvDjkHIpxDJjp56344556IJ34JK67HIKL78jkpxkl8ccl", + "xystDJfgghIJJKKLfoLMclvw8cklpxMNrCcl8cvDDJ12fg4blmwx34HIvwbhmnuv4bbhhiyz" + "ijghzE78tuhiENjprspx2334wxJK01stCFij12NOGHjkjp8c67bhpxxykl4bFGnqrCbhIJyz" + "hiijlm23HIjppxxy56jkwxvwENklclzEvDuvtu8cghotENmnGHfo89DJ78hiefnqdewxrsyz" + "zA6756vDDJ45bhFG4bbhfgghJKhi8cijfgjpKLhi34IJghCFqBjppxwxvwuvtuotfofgghhi" + "eflmnqhi4bclfostuvDJvD8c23xywxmnbhvw89lmhiuvrsjkjpjkpx4btunqotDJfoLMijqB" + "klmnIJjpwxefdenqjkpxxyadijuvvwAByzzAMNde0azEENuvwxOPefNOENzExyhifgghfgwx" + "vwOPeftudeadyzhiuvijxypxjkqBnqmnlmkljkjppxxyyzzAJKvDtuDJvDJKijvw", + "IJCFqBnqijfg67zADJefpxwxrszEhi78AB23zAJKtubhqB56vwjpijwxrCjkpx67footHIkl" + "jpdeKLuvLMclMN4btuvDghhiLMwxuvDJNO34JKzEENjkvDfgzEklABxyKLvwclLMijlmbh8c" + "OPGHgh4byzIJpxMNclDJhi89fg8cjkxyjpvDpx0aHIyzfoadghijjkfgxyGHbhNOwx78ENvw" + "4bkllm8cuvLMjpOPtumnclefyzKLlmotcl45JKhimnHIijnqfoIJ4b56zEbhghhi4b34238c" + "ENde67bh34qB45JK4bstABKLrsbhfgLMefghhiijotyzfo56jppxxy45jkjptuuvjkfgpxxy" + "34jpijad23wxvwuvrChibh1223344bbhhituotfostotfostCFrCrsij34st0123efjp1223" + "px34deeffgjpjkklclkljkjp_0245678cdefhijklmopqrsuvxyzABDEFGHIKLMNOP", + "uvvwtuhiuvotjpwxtuijvw8cpxwxfojppxuvKLefdeMNst7867bhtujkadLMuvklclstrsjp" + "rCvwwx56jkot8ckllmstxypxwxvDvwwxABrsDJot4b89IJvDDJjpfofgghhipxxyijIJjkJK" + "0a34HIjpbhyzrCstuvtuhi23ot12fouvpxvwENijfgghxypxjpstbhhicl8c786756454bbh" + "hiijjkefMNlmCFmndenqzAfgwxefNOGHLMrsclpxyzxyrCzE34zAlmyzENzEmnyzxyklENrs" + "otqBwxdejkABhivwNOtuuvtustotfofgvDFGzAghzEvwbhDJJKKLadwxIJ4b23EN3456cl8c" + "786756454bbhhiijjkOPCFpxJKjpNOnqjklmclKLkljkjplmpxwxLMvwvD23uvDJtuMNIJot" + "LMfoHIKLmnlmIJdeef788ccllmJKfoKLLMmnIJMNHIENot78GHdezEtuFGCFnqzAuv", + "wxfgKL34ghxytuotfofgOP23uvvwhitu0ajkotkluvlmLMFGwxfotuvwrCHIxypxdeuvwxvD" + "45adfgjpstyzMNNO4b12CFmnDJnqef67xy8c34otJKOPjkklGHvw23wxgh56IJ34lmzEfg67" + "ghrsfosttuyzfgxyuvmn12rs78pxKLjp0112stLMvwjkwxklvDvwrCclwx23px8cJKjpjkij" + "hiwxKL89klHIrsstbhotjkLMuvvwwxpxjpijhighfgfootclMNLMvDDJENJKNO4bbhvD0awx" + "uvefIJHItuotfodeeffg8cottuuvvwwxpxjpijhighfgijjpzElmyzclKLzAxypxABjpijhi" + "vDdebhklad0aijJKKLLMmnad4byzzEEN45qBMNzElm4bklxybhnqqBghENdefgzEjkijENzA" + "efAByzfgghbh4bqB34ABMN23zA_01235789abdfghiklmnopqstuvxyzCDEFGIJKLMNP", + "8cfoJKwxtu34stmnpxGHfgghMN89yzotfojpijCF78vDefjkKLENrswxpxxy6723LM12vwHI" + "fgklrCbh4bwxqBclhideABqBbhCFvw4b34NOzAjpDJpxjkFGvDvwCF23IJwxijefjppxzEgh" + "8c12lmJKABzAadHInq4byzxyklbhghwxvwuv01tuMN1245stmnlmtu4bclDJIJuvvDqBhi45" + "34HI23rsfgvwghwxhi7812ijjpbhIJ8cjkrCpxwxvwuvklJKCFtu7801otfofgijdeGHghnq" + "tuqBKLuvLMvDJKDJJK56vDuvhitubh4bstlmotbhABhibhzAclijstrsjkMNfoENzENOrCjp" + "px4b34OPxyNOfgstCFef45otfoyzrCENxyef4bKLzEbhst56ghrsdead0aaddezAef67fg78" + "ghAB6789JKIJpxbhHI4bIJJKjpKLjk_" + "0123456789befghjklnpqrstwyzACDEFGHIJKLMNO", + "jpfguvcl67DJadMNpxJKxyvDDJ8cyzij89jkzEkl0acljpdeENhiijbhCFpx8cadwxjkIJ4b" + "KLklvw56HIfonqLM01MNmnghNOlmij4534fg0abhFGLMCFxy4bwxhiijvwMNjkklpxjpijLM" + "ghtuhiijjkOPottuuvijvDefyzGHDJklpxfolm12vwnqbhhiIJefHI01de23xyijqBjpnqtu" + "zEadNOwxvwfgotpxstjprC12mntuuvlmvDDJclrs8c78vw0167JK56stfootKLLMIJ0aMN45" + "4bstENNO56JKrCzECFzAyzefxyklde8cjkHIjp01wxGHpxjpfoLMijfgghbhKLLMvwuvhiot" + "ijjkklzEENtuMNcluvrsvDotDJvDzEuv8c89tu788cotlmeffoefmnnqqBABqBotnqmntulm" + "cluv8cvD78DJ6756454bxyyzxy_" + "013456789abcdefghijklmnopqtvwxyzACDEFGHIJKLMOP", + "hirCvDCFjk56rs67ghGHtuijMNrC7845st4b34tuDJFGJKbhuvvwtuclyzuvzEfgCFENrsef" + "4bNOvDDJjkwxghhiqB5667IJnqHIbhkl8c78fojkpx45admnGH56lm4bmnijlmfg23rCotvw" + "ghLMdeqBxyvDadnqMNjp34footCFDJABefpxfojkstKLuvtubh67cllmuvIJLMmnlmkldejk" + "jpqBijpxotwx4bfovwfgvDghbh4bzArCxyhiijrsDJwx45clfojptuxy3401yzvwuv23zA12" + "JK23vD34yz8cxyKLDJ4bclJK12tupxot8ctuwxfofgvwpxvDuvABjpKLrCDJCFjkLMefvDuv" + "kltuqBIJ89clDJ7867ghFG45788cwxclbhklotABCFHIfojk56IJ4534pxot23jpjk1223px" + "klJKrCcl348c898cKLDJclkljkLM45MNNOLMOPNO_" + "023456789acefghijknruwxyzBCDFGHIJKNO", + "DJ78vDjkvwgh67klwx34jpijfgstMNLMxypxeflmmnuvhiENFGadotjpijtuuvyzghrsbh0a" + "rCfo23IJnqxyCF4bpxhiDJcljpijvDvwotwx01stbhfgGHyzde0axy34efzEpxghzArs12jp" + "JKKLjkNOfgfoMNklyzuvotJKclLMlmxyABtu8cuvDJ4bqBvDuvbhsttuuvvw23wxpxDJ45jp" + "78adzAyzrsABxyjkrCvwklvD34ENclrsDJHI8cIJotDJvDvwwxyzHIzEENyzCFgh45xypxfo" + "jphiNOOPijvwefdejkklcluveffootfgtuuvvwfoGHwxKLpx4bstjpvwjk5667otklvD5645" + "JK56DJvDstrs78uvstotturC8978strslmfosttumnuvvw4b67fgnqlm56bhqBwxpxjpwxjk" + "vwvDJKklDJlmIJHIIJDJmnABhivDjkvwijjkwxpx_" + "0235678abcdghijklmnopqrsuvwxyzABCDFGILMNO", + "efvDijpxjkDJklfgcldestrslmxyrCIJfoyz4556HIghottuuvhiadfgCF8crsfo89ijbhot" + "tujkhi67ijghjk0afgclkl8cjkefdejpstzApxxyhirsABijjpyzjkqBzExyyzzAABqBnqmn" + "lmkljkjpbhwxvwjpuvghvDvwclDJ4b34bh4bghhiJKklfgghKLhiijjpbhhipxtuqBxyjkjp" + "pxijyzxybhhi4bbhzE45344b5623wxzA12vw34vDzEuvrCvw4bwxbhhi23453445ijstjppx" + "yzxywxvwvDDJJKKLLMMNENzEKLIJrs56ijHI23JKIJ8cGHHIstFGhirCtuadIJJKKL67mnCF" + "rCLMMNNOde12ENzEOPzA7801bhKLvDABfo4byzzEvwotwx1223344bbhhiijjppxwxvwuvtu" + "otfoefdead0aMNJKvwjkwxjpkljk8cclLMMNENzEzAABqBnqmnlmkljkjppxwxvwvDDJJKzE" + "NO8cENzEyzNO", + "kl67ghfgbh4botefMNzEpxjpzAyzxyAB0a78foqBIJjkstJKzAclwxvwzEuv56tuefkl12st" + "LM67pxhinqdebhrsNOABENjpjkrCMNzE34EN45wxpxlmefyzad0a01zE4b56HIKLLMqBMN34" + "ijotIJvwvD67wx23xyvw8cvDJKNOklyzDJvDENuvtughjkstKLtubh344bbhhi0apxwxvwuv" + "tuotfofgghhiijHIkl78ot8cMNhixyrslmstGH34ghclotfoef67zArs56otdeyzABadfgzA" + "strCIJCFFGrCEN8crsmnstotyzxyfofgghLMhiMNwxOPNOij89lmOPvwvDefMNfootqBjptu" + "pxDJLMvDuv8ctuKLnqotfoclwxIJHIefIJJKqBde8ceffoDJvwotkltuuvtu89vDjkijadot" + "foDJhighfgghefhiJKKLijjkklxy0ayzzEyzxy_" + "012345679abcdefghijklmnopqrstuvwyABCDEHIKLMNOP", + "pxjk45wxtubhstLMkl4b6701clrsjpefABdeadMN34fgpxJKhiuvmnvw0awxxybhottufouv" + "yzijvwDJ4bjkKLhiCFot8cJKkl34wxjpxyKLijlmyzhi89LM23pxvDadqBzEmnstjpKLxyyz" + "bhDJzEJKENNO12OPijnqxyhiijIJghfgtuzEvDefuvdeHItuzAstjkhiGHklvwwxyzvD56AB" + "ENxyyzlmNOpxjpclxyijhizAIJmn34yzJK4534lmMN56KL8c67xyzELMgh23MNfgyzKLpxwx" + "4bJKDJjp34vDuvtu4556IJeffootstrCrsst45mnNOjk4bqBnqbhgh67fgqBfoottuuvABbh" + "FGvwwxijxy4b45wx7867vDef5645DJIJdemnyzHIzAIJadDJ4bvDvwdeCF2389zEwxbh12uv" + "pxtueflmhi23cl8ccllmfoghfgottughuvbhij4b344bbh_" + "012345789bcdefghklnprstvwxyzABDEFHIKLMNOP", + "34vDyzxyjk4b45jpzAotfgyz238cbhgh12ABklclDJpxzEfo4bhistENwx01ijjkjpIJvw67" + "HIpxFG34NOefxyvDwxvw56uvDJyz45zEtuvD785689klwxvwuvtuotfofgghhiijjpIJijst" + "deefrs6756JKot78fgghbhsthiijlmjp4b8cpx89ENwxjkjpKLpxrsDJLMGHkltuvwHIjk23" + "bhjp45vDpxcl0awxOP34rCvw4bxy12yzwxlmxyJKzE23DJMNvw12mn34uvzAJKENNOtursst" + "OPklABtuIJvDJKuvtuotzElmzAqBAB01fofgzAotyzwxCFghvwbhrsuvsthiij0arCrsstot" + "nqtustjprC23CFrCrssttuuvvwwxxyyzzEENMNLMKLJKIJHIGHpx12xymnclzEqBzAAB8cyz" + "xypxjpijhibh4b455667788ccllmmnnqqBABcl8c564523bhhiijhibh4b455667788cclkl" + "jkfozEfgbhghfgbhfo", + "bh78ad674bpxmnuvvw0ajp34ENijwxCFxy12MN01yzefsthizEvDbh78JK4bdevw45rChiwx" + "fopx56rsjpijotEN23xyNODJstMNhiadtughuvKLvwbhwx0ahiOPxyyzpxJKvDvwwxpxzAjp" + "ijxyDJ4b45IJpxhixyjppxyzxyrCbhCFhivDDJIJHIGHFGCFrCrssttuKLtujpijhighfgfo" + "ottuuvvwwxvDIJghABjphipxtu56ghfgfoottuuvvwwxpxjpij67jkklDJtuclyzef8cghjp" + "depxlmbh89xyzEyzxypxjpjkklcl8clmclNOij8c786756454bbhhiijjkkl3445zEghLMfg" + "KLadefmndefoijotghtuuvvDDJIJHInqJKhighad56fgIJij67JKKLefvwtufoLMjk788cot" + "qBABmnkllmtuzAklclmnlmmnENzEENyzwxpxNOjkijhibhlmcl8c786756454bbhhiijjppx" + "xyyzzAABqBnqwx34jk23vw", + "pxtu45kljk56lmstIJzEij67vwwxhigh56ENvDklfglmjpefuvvwtustbhde8cmnad4bijuv" + "px0a45xyjpHIhidewxbhvwotclghyz89rsjknqst4btuij78rsuvvwzEqBxystyzwxrCvwkl" + "rsvDDJstvDotjk5645cl67pxtujpfoot56tuhi8cxyuvfgbhlmvDDJJK45DJtuvDgh4bpxrC" + "klfgjkfoCFbhot34ef89foottuij23uv4bghtuoteffgfolmdeadclotrCmnstnqrsstqBtu" + "eflmdeefuvklvwyzxywxvwvDDJJKKLLMMNENmnrCjklmIJJKzAklfoottuKLuvDJxyvDcluv" + "DJtuyzotIJfoxyefLM0a4b342312010aaddeeffgghzAhiMNENNOij4bHIOP23ENzEhibhyz" + "hixyjp4bpxxy3423jpijyzzEhiEN4bbhNOhi4bijjp_" + "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP", + "GHclklhirCHIENghIJHInq01JKvwKLijqBhijk8c78LMad12vDwxfoottuMNuvNOpxOPvDbh" + "xyfgyzefijzEjpdeclEN8c0aGH67CFjkklghNOfgfovwHI564534ot01tuuv4blmvDjkDJ12" + "JKbh34ghmnvDklvwfgwxpxFG23footjpjkklclvwlmvDst34efad4556hitudeLMrsvwKLmn" + "adst67ijuvrCOPwx4bDJpx0aadwxklvD89vwIJHIjk78fgghvDijfgwxfopxxy67GHDJyzzE" + "efdejpvDhibhklENjkad0acl8956HIuv8ccltukl45ot4bjkfoijjkuvstbhrscl8cnq78vw" + "rC4b8cclkljkghjpfgpxxyyzwxmnlm45zAotstzExyABmnotfoqBrsvwcluvzAyz8cABnqpx" + "ghzA786778qBjpijnqjppxqBmnxy8cABwxclvwuvlmtusttumnzAuvvwwx_" + "12789abcdeghijlmnoqstuvwxyzABCDEFGIJKLNOP", + "jpJKfgwxghzAklmnjkpxqByzKLCFvDvwwxcluvvwABjpzAfotupxklij0ahiwxLMFGDJadbh" + "GHjpHI4bnqqBstrslmvwvDxyzEij45wxuvJKhiABbhENturCot4bfoCFDJmnyzef0ast34NO" + "23zAvDotvwwxpxrsFGrCjpCF67jkzEfgklnqijhicl8cIJdewxefghjpfovwotlmvDKLJKDJ" + "vDFGtuuvvwwxENxypxjpyzGHzEijjpxytuKLpxdeadsthiMNbhFGrsjphifgijhijpyzqBEN" + "NOxybhzAzEzA4bnqqBABzAyzxypxjpjkkllmjk34rCzE23OPotENghclbhijhiijLM4bbhhi" + "12jkijkljkjpxyyzMN0123kllmNOmnENclzE34yz45xypxjpjk8cwx89kl78cl678c5645cl" + "kljkjpvwvD4bDJIJfgfootstrsrCCFFGGHHIIJDJvDvwwxpxjpijhibhghotfgfoOPef3423" + "12010aaddeeffgghbhotrCstrsrC", + "cl78uv45wxfofg67AB12efLMCFyzpxghvwhivDotwxklzArsij788cclOPDJmnfo4bdelmGH" + "IJ23tujkstklbhFG34rCjpqBuvJKotCFghABvDnqIJpxHIefGHmnlmDJ01xyadKLtuyz0afg" + "uvzEcljkfohiqBNOotdersstnq8crsijadMNENJKLMgh78efKL0azAhiIJ67fgghfoMNrC4b" + "JKotHImnGHijtuLMfodehieffoottuDJFGjpbhuvadvD0aABuvjkpxijqBKLjpijIJhitu01" + "klxyCFclJKyz0azEforsghdeadfgef8cfo0aijot01zAnqLMtuzEuvvwmn89IJMN8cwxABEN" + "NOzExywxvwstfoefcluvdeeffolmclotfotuMNzA12fg4buvghvwwxpxstjpLMxymn01bhpx" + "nqij4bbhrsgh0aadfg348c7867rC78zE8cDJENclNOOPJKKLJKNOfoENzEklDJ_" + "1245678adefijklmnopqrstuvwxyzBCDEFGHIJKLP", + "uvfgad0aghbhtunq8cfoNOyzotst23fgfoghDJhi89zE4buvrsijfgIJefghjkxyyz12pxkl" + "34bhwxfgvwtuMNuvclhiKLpxHItu4bENvwijzAjppxABjkLMxy23wx458cghklclotijyzhi" + "ij8c78tuvwuvvDzE34tust4bzADJABMNotxy23rCfoefjk01yz56fgfovwderswxad8cotst" + "jpkltupxrsvwjpvDuvtu12DJqBotlmfoefforC23ottucl01uvjkIJOP0a8cENjpxyNOJKwx" + "vDDJvDuvvwzAKLtuHIJKvDotfoABIJLMdeGHFGeffo01HIIJDJvDvwIJMNwxotOPtuGHHIuv" + "vDpxIJFGJKadjpjkKLklDJJKLMKLJKDJvDLMuvtuotclfoef0aaddeeffgghbh4b34231278" + "6745564bhiijjppxyzxypxjpijhibh4b455667788ccllmmnnqqBABzECFlmzAklABjkqBhi" + "ijjppxxyhinqkllmmnnqqBABzAyzxypxjp", + "KLJKlmijOPfgrsxyvDqBpxLMuvDJKLwxmnjphixypxyzNOIJvDxyrCstvwwxCFHIzErs45DJ" + "pxjpyzJK674befENzE45ENotijbhxyjkklhitude56vDLMDJpxIJ0aHIjpuvGHMNclij4bbh" + "78ENrCad67ghFGpxLM4534HIjp23vDfoxyyzzE4b01bh12DJzAtupxxyotpxjp56hiuvefij" + "jp45depxtughuv34ABvDvwuvJKENyzfoot4bst562312foefotfgdeefaddeefIJfoot01gh" + "tuDJfguvvwwxrsvDpxvwjpxyjkhistwxvwzAvDtuklrsDJcluvNOvw8cIJjp89OPlmpx0ajp" + "01ghwxjkij78pxNOvwHIGHuvFGjpENpx0a67klxyjkhikltuyzcl898cghclzEfgzAstrsrC" + "rsxylm5645wxvwuvstmn4btu45uvnq5667vwghmnwxxyqBlmkljkijhiijjkkllmmnnq_" + "0356789abcdefhijklmnopqrsuvwxyzABCDEFGHIJKLMNP", + "vwaduv4byzJKMN34KLqBtupxjpzE23ENxyDJfo78IJyzNOijOPfgJKLMhi45bhjk4bwxot12" + "vw34zEklij23jkpxlmKLclMNjpuvtuzAst8cxyijhiijnqrswxLMyzrCxyvwmnwxuvyzABxy" + "MNENzEzAMNvDDJIJJKtuHIIJvwqBuvtukldewxvwvDDJJKKLLMMNENzEyzpxotuvefzAfoot" + "stjpjkHIrsijhighCFclbhotNOkljkxyhiyzENMNzEyzxypxefjpjkijfgfoklotfgLMwxpx" + "JKstjpjk4b455667788cclkljkijhighotrCfo898cbhABgh34GHwxfg45footrsstzAtuHI" + "gh4bcl56rsuvvw67vDpxlmyz45xywxvwyzpxmnbhkl4bnqwxghrCmnCFjpjkklijbhghfgot" + "foefde0a56addeeffglmghhimnijjpbh4b0a78pxxyMNENzEyzxywxvwvDDJJKKL3423clvD" + "uvotIJstDJIJHIGHFGCFrCrssttuuv128c0189clzAAB", + "MN4bCFuvjktuxyotyzwxbhklvwpxxyGHyzuvzEfotuij010anqefyzmnvDuvDJjkwxxyvw34" + "otwxyzsttuENFGzErsdefo12yzadxy45jpef01pxwxuvvwwxNOstrCfotuuvotvwijjptu0a" + "xyghOPyzde4bzAforsLMMNENyzwxAB23xyyzzAABqBnqmnlmkljkjpjpjkefmnyzclCF8c12" + "adfgklfojk01jpbhpx34xyclwxvwuvtust0ayzotvD89deABDJzE4bJKDJyzfouvrCrsstot" + "KLfg23foghxypxLMbhfgfootjphituijuv8c8c786756454bbhhiijjppxxyyzzAABqBnqmn" + "lm34klvD8czEjkjpvwENrCDJIJHIDJpxxyyzzEwxvwvDDJJKKLLMMNENzEyzpxjpijbhbhhi" + "ijjkklcl8c78675645IJHI8c56NOGHOPHIkljkjppxDJvwxyyzzEMNLMwxvwvDDJJKKLLMMN" + "ENzEyzFGpxjpIJijhiJKbhklcl8c786756454bbhhiijIJHI", + "45wxpxjp01jkefkl0aDJxylmIJLMyzdepxNOijhijpadijGHENvw56hiuv8c89stbhNOFGwx" + "MNotst8cHIpx78vDOP4bzAjpvwGHclhivDijxyhi45JKIJbhghpxCF4byzwxjkfgKL12jppx" + "wxfobhghvwijhiijwxLMrs0167jpxy5645pxjkxyyzfg8cklzEot78EN4bstcl8cbh67NOJK" + "HIKLyzrC344bbhxylmmn4bjpot34yzrsclDJJKhivDijhijp56pxjkzE23EN45NOstjpOPjk" + "rC34fo23otlmzAfoefghfozE12nqtu0aABLM2334MNdeklyzadpx45uvde0ajkhi56ijwxDJ" + "jk01vD12mnqBvwklIJefDJwx01xylmvDABhiyz67wxJKzEvwuvENklqBwxtujkotfostotMN" + "ijLMstzApxrsrCCFrCrsstnqotzEKLfofgefghhiJKijdeadzA0aadDJjkklABdeclqBnq8c" + "qBABfgef_012345679abcdefghijklmnopqstvwxyzADEFGHIJKLMOP", + "67ghjkjpOPhifofgDJFGvwijpxghkljplmhiijwxmnvwvDxyuvtuwxvwwxstrsnqqBABzAyz" + "xypxjpjkkllmGHmnotfofgghhiijstjkjpklclIJbhHIotfoxypxjpjkkllmmnnqqBABzAmn" + "12zEefrsfgEN4b45NOghDJhiOP3401IJijrCrsotJKbhhiijCFFGvD8cdeGHef89yzjpstxy" + "pxrsvw4bzE23jprCKLbhLMtu4bDJxywxpxjkIJhiadklHIfgclijrsjp1234450ahi56gh01" + "jkdepxklyzxypxjp8cad78clMN23hiijhikl8cENjkkl78jppxwxbh45vwclvDuvkl674btu" + "otstfootbhtu34fgjkvwjpwxghyz45zEENFGGHHIIJJKKLLMMNENzEyzxywxvwuvtustrsrC" + "pxrCwx78DJvDfgrsJK56yzDJfovDvwvD8cstKLJKDJotvDKLuvsttuforsoteffoefdeotzA" + "tuuvrCCFvwwxlmmnnqqBABzAyzxypxjpjkclad8c780amn67lmmnnq56", + "tu12ij23otwxlmxyjpvwvDLMijJKhiMNgh78zApx01ABcljkfoijhijpyzzEmnbhwxijfg4b" + "qBIJHIadefENnqpxjppxjkKLkl458cxywxvw34uv56zAvwNOhi0aDJ23tustwxbh4brsjkOP" + "yzcl89rCpxjpjk12px45de23ijABkljkwxfgvwad8cjpvDpxDJIJDJENzEcluvotyzklvDxy" + "stDJvwwxpxzAjpijrshibhENqBjkvwzEijjktuhighfgijfoNOENIJfgnqotlmjpJKzEDJpx" + "yzrCCFxyuvvwpxjpwxijzEMNENzEyzxywxvwvDDJJKKLsthiklMNFGtuKLJKDJzEijjkrCij" + "rshighuvfgbhfovDotstkl4botbheffoottuuvlmvDotDJIJde45efdefo34ottughotfggh" + "bh4b342312010aaddefoefde0a1223bhhiij3456jp45675645pxzEyzxypxjpijhibh4b34" + "2312010aaddeeffootstrsrCCFFGGHHIIJJKKLLMMNNOOP677889zAAB", + "uvhibhwxvwjp4bxyghfgijpxefyzhidebhjpxyghpxfgadzAfo4botwxjphizEeftuuvfoij" + "otjkENklLMstlmNOhirsyzvDxywxijbhvwzErCAB344bbhghhiyzfgghijfgfoEN6756jpwx" + "mnCFotpxefqBstfojpjkdenqxyefyzrsfgdeghijhighfg23klefijadrCde450aNOjkad4b" + "bhdeefclfoghottulmfguvfofgot01zAghvDOPbhst4b0a45563445klABfopxDJjpefIJij" + "vDhipxbhzEaddewxefvwvD786756454bbhhiijjkklcl34ad23MNfo0aclDJENjpzEvDwxpx" + "wxzAvwotvDstABkljpjkDJkljptursclIJHIqBwxnqpxJKmnGHwxjpzEIJMN12HIENJKKLlm" + "IJ8cclkl8cjk89jppxvwstLMtursMNuvJKvwENwxpxjpjk8cIJklHIclklzEGH8cjkjpvwFG" + "pxvDDJKLLMMNENzEzAKLJKDJvDvwwxpxjpjkkllmmnnqqBABzAzEENMN", + "stxyyzzEvwuvrswxvwklhirCijLMIJfgtuclef8cDJjp67ghCFjkjpJKstEN89uvbhpxfgvD" + "rsuvkl23ghfojkhixyKL4bklclijFGDJ8cJKKL12fgyz45MNghlmzAwxfg34HIjkxybhklyz" + "56NOzErC45jkmnde2334efjphiGHENpx2301ghfolmjkklNOfgottufojkfguvvD5678uvot" + "67ijhi56DJjpsttuCFnquvvwrs45fo4bclwxxyklijIJefuvyz56zEpxdeeffojkjpotghtu" + "uvklvwwx12bhxyfoyzrCcl2312hipx8cefad0aOPvDvw78adst67zADJ78deefijfoclklfg" + "wxvD56fojk01fgrsklvwtu45wxIJCF4bclHIGHHIghIJDJ45vDuvABtujpxyadvw56histij" + "bh8c78wx0a67pxzAothi5645fovwyzqB4badxyzAwxAB34uvstfgbh4bbhhighbhfgtufouv" + "vwijjkwxkllmpxmnotlmkljk_01246789acdefijklmnoprstuvyzABCEFGHIKLMNO", + "deghhiIJCFfgefFGijHIKL78deghpx89zEfgwxvwLMklMNbhghhilmNOfootadOPuvjpENzA" + "st4bABpxtu67NOxyyzJK45effgghwxbhjkijjpKLstkl8cjkvwrsmnuvclpxGHIJxy4b45vD" + "yz34hifo56klrC8cdeqB78CFstclzA458cotfoijstwxjkABHIfg56lmzEghrsbhEN4befhi" + "nqjpJKfostpx67yz34otDJvwfoxyyzzEzAklfgrCghENjk89jpLMwxklKLstvDABuvtupxLM" + "ot2378adMN34efjkdeclyzkljkjpijpxhibh4bCF453423lm12NO010aFGadqBdeef236756" + "mnrsGHjpfoot8cfgtunqmnuvlm34nqfovwqBstwxOPuvotsttufopxrCfgCFrCxyghjpFGrs" + "bhrCCF4brC45stwxtuuvvDvwbhDJclvDlmDJvD34rsvwhiijwxIJ23clhixyyz1223344556" + "67788c78bh6756453423_012345689abdfghijklmnopqrstuvwxyzABCDFGHIKMNOP", + "34jkmn23ijtufootfokl56xyeffo4bjppxxydenqzAENzENO67clhiijqBjkadABGHHIzAEN" + "zEbh0ayzxy3423jp8cklpxlm4bgh01wxvwvDhiijjpmnpxxyjkDJLMENjpjkzEOPNOnqJKkl" + "ENzEqByzxyclpx8c78wxOPjkvwuvIJtustxyjpotfofgzEMNijENzEghyzbhfoxywxxyyzAB" + "xywxvwuvtuotfoefdead0a011223344bbhhiijjkkllmmnnqqBABzAstjkfo238cotpxwxrs" + "xyrC01yzvDtuvwzAAByzxypxjpjkkllmmnnqqBABwxpxCFFGwxvwDJvDvwwxpxGHjkij45bh" + "DJ4bbhijjppxwxvwuvtuotfofggh45cleffgJKstfojkotKLfodeadstef56klfo458cghot" + "rs78strCotclfohiij0a01122334455667788cclkljkijhighfgefdersCFFGJKHIrCef89" + "ghrshistijfootjkfotukluvvD8cclklefjk8crsrCCFFGGHHIIJDJvDuvtuijJKhiKLgh", + "wxghadpxjpvwwxtuvwstijnqvDxyvwwxpxvwjpDJIJhiwxuvJKbhFGtuijvwwxxyjk23yzIJ" + "vDKLfgklstghfouv6756zArsABijpxJKxy12LMottuhiclHIIJot8cyzvwijzEwxzAdepx4b" + "zE89lmJK45xyfgvwefjppxuvjk4bstxyijbhghklKLENfgmn34fo4bvDclhiij8cdeJK89GH" + "ABHI7834jk01klefIJotstdeNOJKGHlmqBadDJHI2334jpijbhyzOPhiijjkjprszEpxwxGH" + "rCCFrsstdeeffootstrsrCCFFGGHHIIJDJvDvwwxpxjpjkklcl8c78675645342312010aEN" + "ghfgNOKL23vDklgh01jknqABjpLMzArsbhOPpxyz344bbhwxDJhizEfomnxyyzzEENMNLMKL" + "JKDJvDvwfgwxzEuvABvwuvstpxjpijhighfgfoottuuvvwzAjpbh4bxywxvwuvsttufoef78" + "dead4556deeffoottuuvvwwxpxjpjkklcl8c78675645342312010almxyclrsmnnqqBABzA" + "zE", + "rsDJlmghwxzEqBIJjkbhENcl2312yz4bhiturCfgHIvDCFotDJzA01px78vwuvtuvDnqjpst" + "pxfootwxkl67rsNO8cclMNijghDJfgJKjpefxywxpxyzDJxyvwyz45AB8c56uv78vDdejpqB" + "KL8cIJpxjphirCLMijpxwxvw4534bhuvjkDJtuhizEvDij4bstrspxvwrCyzCFJKjpbhpxxy" + "pxkljkjpKLpxOPghyzwxcl8c34vwvDothifo12DJ23fgzEuvlmstvDvw34DJklzAotfoyzwx" + "FGENpx4bJKghefhideijCFjkGHadst0acljp8cpxxyyzzAbhAB23wx89rCdeotmnnqvwrszE" + "yzDJhiuvxytupx12rCqBst8cotvDuvfoCFclklefjknqjpvDpxijottuxyuvjklmyzmnDJJK" + "vDzAlmdezEklENMNNOMNLMFGMNNOKLOPJKGHDJvDuvtuABNOotfoMNadfgotjkstrsrCCFFG" + "CF0arC01rsst0aIJotfofgadghbh_" + "01234567abcdefghijmnopqrstuwxyABCDEFGHIJKLMNOP", + "hiefuvMNENzE8cbhkljklmmnDJ78IJdexyclfoHI34klwxvw8cfgENtuABuv4bot4589fotu" + "ghijjphiclNOyzpxstefvDuvDJtuotvDnqwxfodeJK56rsxy67DJjkklstpx78KLotLMfo01" + "ad0afgJKyzghrCjpKLpxbhwxvwENijzEvDdeadCFwxpxrsyzjkjpOPFGENrCpxDJhiwxCFjk" + "vwJKMNbhfouvtucl01xypxijjpIJpx8c4bwxvwvDuvNObhpxclOPtuHIijotjpstklhitulm" + "DJjkrsjpforCuvpxefijfodemnklwxadvwvDxyCFyzENwxzAxyyzxywx45jppxvwxyzEABlm" + "debhqBuv0apxjpjkkllmmnnqqBABzAyz34zE45ENtuijwx4bNOst23otfoMNfgjkfopx45hi" + "56tuuv4534vDuvtuotFGst56foDJrsfgghhifgstJKKLfootLMfofg4bKLstijjphibhJKpx" + "hi12ij01wx4b0a3401122334jkDJ4b_" + "1356789abcdeghjklmnopqrstuvwxyABCDEFGHIJKLMNOP", + "vDfghi4bjpvwyz89jkwxDJklpx45bhjpuvJK67vw8cKL34lmef23tude4bijwxjk01xyhiwx" + "zEbhotENmnijuvklIJ34JK23vDnqHItuuvqB56ABzA124bIJNOvwstfohiwxOP78pxjpadij" + "otjpefdetuhighfoyzxyuvefrsdepxbh23hijpijhiotzAbhjpGHHIrCvDad4bDJtujkkllm" + "mnnqqBABzAyzxypx3445cl23JKvDKLzELMMNKLvwIJyz0axyNOklENzEyz8c4bxyadde56bh" + "jppxwxst6778vweffovDjpDJNOjkENuvvDpxghvwDJjpjkbhpxwxIJvwkllm4bvDJKDJclvD" + "56uvIJtuGHotOPfoFGfgstGHrsstmnfotu8cHIuvlm45IJvwJKpxjpjkcl56mn67KLLM788c" + "nq56clkl45jkjppxwxKLJKqB34vwDJCFAB23vDMN893401ENrCuvtuCFotfoDJefstdeadzE" + "JK0a45KLzA561267zEENMNLMMNENzEAB_" + "012345679abcdefghjklmnqrstuvwxzABCDEGHIJKLMNOP", + "jpxyjkFGwxkl56rC78GHDJpxzAHI6701vDyzvwxyjpmnpxcl12ABwxvw8cuvzEvwyzENij78" + "hijkNOJKxyOPijhighklzEDJKLjpMNpxyzhixypxijlmjpyzpxxyENmnijhinqzEyzJKIJxy" + "NOpxjpENzEbhLMclKLijuvtuotfofgghhiijjppxwxLMbheffgyzstotfoJKrsrCvDkl4buv" + "px34mnCFvwghhi8cjkMN23jpjkHIpxrCwxxyij78kltupxjkcllmklvwotstjkijhighbhfg" + "ghhiefvDjpijjkhirsbhde4b34IJadfoefdeqBfgrC45klmnbhadghhiijhibh4b45566778" + "8cclklefjpCFfoKLpxxy45bhstotfgfofg0aadghbh4bbhLMFG89tudeghGH01yzCFMNrCuv" + "strsrCCFFGGHHIIJDJvDuvefzAottuENuvvDzEzAnqABmnlmcl8c78ENMNLMKLJKDJvDuvtu" + "otfofgghbh4b455667788ccllmmnnqqBABzAstotfo45stfgghbh34231223344bbhghfgef" + "dead0a0145", + "89wxuvLMbhvwtuMNpx12uvjpjkwxxyvwtuwx4befyzklclxypxzEjpnqyzzAotqBnq8chi45" + "vDNODJfgxyABIJtuuvpxHIbhdeijtughzA4bvwGH56wx78fgbhjp45EN4b67hifoijxyghst" + "zEuvyzxyNOpxjpOPFGbhghefotsttu4bstfgrsENrCgh34rs23hiad4bijsttujkkluvvDlm" + "vwvDDJfoJKvDDJwxfgpxIJKLvwxymnjpLMMNENzEyzxywxvwvDDJJKHIbhghNOde4bbh344b" + "ENbhhifgijbh34pxvw4bzE454bbhefNOhifgzAwxOPijbhjkklcl8cKL4blm3423344bvwcl" + "jpbhpxwxjkghklvDmnDJfgvwnqijclhivDDJefABuvijqBtuotIJ78fodeDJefjpdeefGHfo" + "ottuuvvwwx67vDABvwwxadvwvDDJIJ0a01JKDJvDuvpxtuotfoHIefdeaddeeffoottuuvvD" + "DJJKKLjkjpjkpxLMklclwxvw8c89vDIJDJIJ8cvDvwwxpxHIclGHkljpjkjp_" + "1345679abcdefhjklmnopqrstuvwxyzABDEFGHIJKLMNOP", + "4bDJ45nqijghhiGHfgbhijjpjk4bklpxcljphixy34wxyzzEvwij12vDpxjpwxvwxyjk0ahi" + "ENklot56qB01ij23bhIJMNJKlm89pxadyzDJxyABfovD0azAyzhizEFGijwxIJ01jpENHI4b" + "pxDJGHgh45mnxywxyzzEvwuv8cclijtuhifgghijENNOMNLMxyKLjpMNpxxyJKOPDJvDjkIJ" + "uvENyzwxhiDJijklhitustjktughijhiijjppxwxvwuvtuotfofgxy89bh8crspxijsthiij" + "bhfojppx4bbhrsghJKxyyztuuvvwzEwxxyqBABzAyzxywxvwuvtuotfofgghbh4b45566778" + "8ccllmmn5634EN4bbhforCklhistNOENzErsstyzxypxjkrCijhijpotfostefijbh4bhibh" + "34de23454bbhCFghfoadrsrCCFmndersst34otlmtuuvvDfofgghhiijjppxxyyzzEENMNLM" + "KLJKDJvDuvtuefmnABnqmnkl12deMNjk23ijEN344556highfoottuzEuvvDIJDJJKqBABzA" + "zEENMNLMKLJKDJvDuvtuotfofgghhiijjkkllmmn67HI12", + "IJwxtudeklclvwijjkhivDghkllmfgbhijNOHI12jpGHhiijJKMN4bghmnbhnqefENjkpx01" + "adzELMzAfoMN0ayzkl4bjpjkotdeadKLklfg45xywxqB4bfovwuvDJghtuLMhibh56ENotst" + "zEfgrsghfovDOP67rCuv78MNDJvDbh4bIJDJvDefJK45DJABFGdelmpxwxEN56344bijklvw" + "vDbhjpuvDJtuotIJjkkllmfoNOuvmnOPlmijkljkfofgghhiijjkkllmmnnqqBABzAyzxywx" + "vwuvtufostKLmnuvJKvDHIIJzEDJENpxHIjplmvDrsklhiijjppxwxvwuvtuotfofgjkyzst" + "footfoefGHfoCFuvrCJKzAABrsijhiijjkkllmmnnqqBABzAyzxywxvwuvtuotfofgmnghfo" + "CFpxlmotjprCFGMNzECFHIIJDJvDjk455667788cclkljkjppxwxvwvDDJIJHIGHFGCFrCrs" + "stotfofgghbh78cl563445yz233412234b01bhhiijzEjpKLxypxjpijhibh4byz342345zA" + "AB56126778898cclkljkjpzAyzxypxjpjkkllmmnnqqBwx89", + "ghfohi34ijjppxxyfgAB8cghhizAbhijjk78jpeffgghABhi4b23ij01jppxyzxy67jpuvtu" + "otfofgghhiijjppxwxklLMst4556yzMNENzEyzclxyjp120abhhiotstijjppxjkwx4b8c01" + "jpjkyzklJKKLLMMNENzEyzxywxvwvDrscladfoIJjk34wxuv89jpbh234bNOhistdetupxEN" + "wxjpfg0ajkad78bhHIkl4bGHijFGot34vwrCOPwxlm67mnef23yz12zAvDCFjp4bqBABzAyz" + "xypxjpjkkllmmngh01DJdeuvzEyzxymn23jppxbhwxvw4bvDDJIJfoNO458c5634otOP4556" + "jpijjkhiHI4bij67bhDJ4bfgjkkl78wxfoghjk34zEhi23yz12tuzEvDDJJKKLLMMNENzEyz" + "xywxwxIJpxtuotfofgghhiijjppxwxvw233445DJhijk56foklHI67lmkltuvDwxjkpxotfo" + "fgghhiijjppxwxvwuvhighhiijjkklcl8c78675645342312010aaddeeffgklDJJKjkGHvD" + "DJKLvD89JKyzjpHIIJFGHIpxzApxjpjkkllmmnnqqBABzAyzJKKLLMCFstMN", + }; + + solutions["Small random tree 1"] = { + "12:34::", "141301", + "0214_01", "0214130113", + "130114_134", "1302140113", + "01130201_02", "01130201_123", + "01140201_134", "130114_01234", + "141301_01234", "01131402_0124", + "13011401_0123", "13140102_0123", + "0114020113_023", "13011401_01234", + "14011301_01234", "011301020114_134", + "0113020102_01234", "011402011302_1234", + "020113141302_1234", "0114130201130102_0123", + }; + + solutions["Small random tree 2"] = { + "1:234::", "01121413", "12131412", "12141301", + "14011201", "121401_02", "0112131413", "011214_034", + "1214011301", "141312_024", "011213_1234", "011413_0234", + "01131412_134", "01131413_013", "12011301_012", "131214_01234", + "131401_01234", "131412_01234", "140112_01234", "14011413_034", + "14120113_014", "141201_01234", "13141201_0234", "13141214_0134", + "12011401_01234", "1214131201_0234", "1412011301_0124", + }; + + solutions["Medium random tree 1"] = { + "12:456:3::::78:9:", + "68160102_48", + "010215231667_15", + "026801160167_28", + "1502231601166716_02458", + "151416230201166816_345", + "230223011615671679_235", + "6801166716791516_16789", + "23010279671667790116_039", + "0123022314166779_012345679", + "23681401166701026801_02468", + "1401021516681523677901160167", + "011615681416146779_0123456789", + "010216150179236716020102681614", + "160279016723022301161468151615", + "140116687902012302162367_134689", + "156701021679230102231468_023579", + "670216687914166779011516_056789", + "6716791567140116670214_12456789", + "022368160102681614010214_0123458", + "15160167160267231468790167167968", + "16236714790116011567026801161502", + "0201792302672316141514_0123456789", + "1601156716020168141623790102016714", + "161467010216230115681668_012346789", + "1614670116670268796715011602236815", + "7916672314160215016779671668_12349", + "010216150167230223161401_0123456789", + "0102152301677967160201026814_0234567", + "0102160123671602011568167914_1245679", + "67140216680116147967682302_0123456789", + "010215672368166701027967162301_0123567", + "161523016802671679016716230168_01345689", + "23166802010279166714167901236814_134689", + "1601026716236715796801160267791401166801", + "167901681614020167162302681523_012345789", + "67147916676802230115160201230201_12345789", + "0216156701230223146867796701166716_02356789", + "67791602146701142302231516016779671615_012345789", + }; + + solutions["Medium random tree 2"] = { + "123:::45::678:9::", + "575834_47", + "34030157_047", + "3558353435_58", + "350335025669_05", + "01033558575669_17", + "01346956035735_149", + "35033556353435_356", + "0235010301355835_123", + "0135560302356957583435", + "033435025756693501033558", + "570135340302585669355635", + "56585735033403020334_03467", + "35560335010269565734_013478", + "0201033503566957343556_01457", + "3401695669350335583534_13459", + "57020301355658033435_0125678", + "350203015658695657_0123456789", + "350356585669563558340301_03458", + "0158026957566903343534_02345689", + "03566958573557030201_0123456789", + "035835023403566956573557_024567", + "343558030156350369025603_014569", + "0201345758036956576934_123456789", + "03355801693403575635560203346902", + "3558566957030134033556026903350234", + "356957345635560103346901_012345689", + "02560369355758563435035669_01256789", + "3501563457355603346956580257_1245679", + "56690258573503345635566901_023456789", + "02033534015658350357350356_0123456789", + "343503583501020302573534566957_024578", + "560203340235586957563503010335_0234679", + "5635695603350103580135573469_123456789", + "69033501035801355734355669023503350257", + "3557340356356902035635035602_0123456789", + "3556580334350103573534566956350203023556", + "35566903350257033556356901030135_0123569", + "35563403355602576956033503695802_01256789", + "5635030103350302586903565735690301_01256789", + "5635690334025603350169035835030158_0123456789", + }; + + solutions["Large random tree 1"] = { + "1:2345:de::6::789::abc:hijk:fg::opq::::::t::lmn::::rs::::", + "kn149h69469k699h688b46_1hn", + "2e671214699i46kn689k9h8c_7en", + "kl9k9jcq2e8c684614122e14_elq", + "1514468b6801461214132e8cco_05b", + "co8c68kn699i9k8a46km699j679k69688aaf8a_46imno", + "9k67cp8c696867159i9h14it46co8c68or8aag6968cq8c_578hkp", + "os8c2d9jco6869468c129k8a8b149h46km696701af9i8ccq_bcdhjs", + "9j9kcp8c68cqit9i69682eos8c12cokn9k8c681469or469iit699j_ejpqst", + "9i2d120114it4669139kcp8ckm68698c9k68kncoos9i9k9i8ccp9kit_04diknpst", + "68kl2d9hit9i468a9j126869148aag468a9k14680115698c68cq8aaf8a_0468dhijl", + "os6912co688c8a67kl9i69142dco9j688c9k6915679h46146946699horag_2579dhils", + "it9j8akm69469k13681401698c8akn9iagco9j9k68468cor68af69co8c9i8aos684668_" + "34acfjmnt", + "6946129iafkm142e68018aor1368128c9kco8b6946689hag9j678a8c14688c8b462d1369" + "9i68128cco8c68it159ior_234679bcdefijmr", + "oraf468a159h9k149iag1269knkm1468it9j694668149j8ckl8a9i6869co019k1468698a" + "6869af9i46688ccoos6846co14_024579fghijlmot", + "cq8ait149iag468c691446co129h699k1446688c0114152d46cqkm2e8a8b69149j124668" + "69af149j46149k688a68af69688ccoorco_1268abefhjkmoqt", + "691246kl142d9j0168698cco8aor689h69689kit8aagcq8c68461468122e8c6869os469i" + "14it69689hcp8c6846co699kkn9k6967688c68_024678adhijlqrt", + "8cco12682d8c698a689k69klcoos4614afco9i158cit9k6846coor141201co8a699i6867" + "1369469j14cp8cag696846149k8akn9k15699h_23456789abcdfgilop", + "8a688cco69or9kcpknaf68138a468c682e12co14os699h8c2e6867km8aaf8aco9i689k8b" + "689jcq8c699i68it46149i699k0115km1446148aag8a_345679abcefhjkmpqr", + "9icq688b2d4669688a8ckm469k671468coorit8c69klcp139h68699iit4669149k8c46co" + "8akn016814af8a158c68agos69co9k46149j6967468a6846os8c1412682e144668148a0" + "1", + "8c469h688cor46klco699i9h8c9k14460112oskm1469co469k6812138c1415cqkm462d14" + "os9j122d1469689k8a6946af8c6846ag148aag1268462ecp128c6867_" + "1234abcdghijlmpqrs", + "699i699h67688cit9i8a1469469k8c1469158bklco8cagor9k126801co2e8b8ckn68cp12" + "9j6914469k2d141314af6968468c8a6846os149hcocq8ccp8c01_" + "01234689abceghijklnopqrt", + "69469h1468699k46cocq8cor8b68158a69km9j14670169af9h9i682d691246co149ios2e" + "9kit8a68699kkl019i138ckn688a461469129kcpkn688c68ag461413699iit8aco688c2d" + "46688a", + "cp0169682d46149h8c1346696812469k14cqitkl15co9k2e8ckl68698aos9kkm4614co12" + "689k8aaf2e14469i148a9kkl9k69688c8a9i12it9j699j688c8b68coorco_" + "0589abdefghjlopqst", + "68468a1412af688c2ecq138a699h679k6846af14128c68cp69kl9k468c2d140169coor8b" + "9i466968oskm8cco14469k9i14kn122dit149j69km689i4614ag8a6846ag158c14466901" + "68698b9j", + "cp9i1469468c8a149j68or12cq8a2d142e699h67699k4669kmitaf1413468b1514468c68" + "01co12or69os14468c9k69689kkl9i699kkn8a688bagco699i8a68itos9k699k46148c12" + "8aag2dkn", + "698c468acpafco146846129i159kkn2e8c14or68698a9k1301ag4614it6769os8bco8c12" + "co688b6846km14or469h129j9k2d12699h469i9jkl1446129k69469k_" + "0124568abcdeghjklnopqrst", + "8a01ag6869148c9k124613itkn8bco14152dkm8c6867692e9h8b8a684669or9icoos8ccp" + "68128c9k6968149jco694614128c8a9jcoaf68or8a6867af2e_" + "0123456789abcdefghijklmnopqrst", + "8c681412co698b8c682d9i46691415cp8a68699h12462e69ag9j8a688c466914it466769" + "os0114or689k12469icpit9k6968co2d148cafkm8a469k68kn46km8a_" + "01245689acdefghijkmopqrt", + "8aag01689i8c8a69co689horit6946af8cco8a9kkmaf6914688b9j9i15os9k46itkl1468" + "8c46686946co128b2e148c9k4668kn69138c68cp9k8ccq68148c4669146768129k461446" + "2d122d138c", + "6814699jcq8c68co139k8c9h6968cp46klkm8c9k67kn14688b69689i466901it14469ior" + "679kit8a01co1215kl6914af8c4612or14os682d12142dco8a4668148a8cos2e122e14ag" + "468a688c699hag", + "co13699k688bkl469i8cco68144615148aafor69129k2d468a8cit68cq69km01kn9kos8c" + "148a9jcoklos9haf46699iagcp46128a2e8ccp128aafag9kkn9k142e4667699i122e12_" + "23456789abefghilnopst", + "6968af9h698a678c9j68co8c9kor46cp68km14699i122e4669kl14af469hit688cag68cq" + "129k699i1446699k01688c142d8b12kn46682d018aco46681514ag1546688b_" + "0123456789abcdefghijklmnopqrst", + "1468468b67699i68698c13co8bit6701148c9h684612144668698c12knor2d68co8aaf15" + "8a68698c149j689i6968kl468c68cq9kcpos8c686968klco8aag8a6846142e1201152e_" + "012456789abcdehijlmnoprt", + "689k4614698c469k9i68km8ckl01cq8cit141569469k68699j8a8c6869coaf12os9i8a68" + "67af14ag8c8akl68agco699h462dor8b1469139k8c9hcp46144668462e12kn1446122d12" + "01_012478abcdfghijkloqst", + "1469159iit688c4669680114co9k8ccokl67136869oscp8b9k688a14kn8c46co68km8c14" + "orcoor69cpcq688c12cq461446699haf9i68122d129j69688a9j689k691446688a14ag12" + "8a6846142e121446688a2e9kaf", + "68kl46698c9k2dcq8b688a8c144668128ait14km01co8c6846co6814orag8a68co8caf69" + "469j9i14122d1269159h4614466812462e8ccpco121413os14466768co8c6846co141501" + "or46_0134578abcdghjlmoqrst", + "691468itco9i699k8corkn684614cq8a699j15122e8c46144668cp8a9k2daf461314km8c" + "466769it67kl9k13689h69688c8aco8c689iosco69688c8aagcp8a684614011446688a_" + "123456789abcdefghjklnopqrst", + "9h689i8a6946148b68678c0168co9k9h694614kl8c15orco9k68or46km8c699k1469469i" + "12kn2e689k1469128ccq688bit682d12468c6814129j8a2d46af8a9i69149k8c122d1268" + "8aag148a68_012345678bcdehikmnqrt", + "9k1446691446kl129k14461368km8a9i9h14af69688c9kkmcq462eag9iit1401149ikn69" + "9k688c67469j698aklcpco1415kn46or148cco469koskn9k9j699j6968121446688ccp46" + "1412_01234568abcdefghijklmnopqrs", + "9k8c9icp14co68os8c8ait69km12cq9k6769kl9j4614coos139kkn699i4668149h8c1246" + "af682e144615699kkn8b8aaf68148c1246agco2d126901or14co8c688b8a68468aag1401" + "1446_012356789abcefghijklmnopqst", + "698a46689k46146701138b8akl8c684614af8a6846158c12142dcq68af8c68cp12692dco" + "9h462e699j9kkl14os8c688ccoor12km698c9h012e688c9i9k699ikn9k46co67it9i8c69" + "68699kkm8c_0123456789abcdefghijklmnopqrst", + "678cco144669688b8a12699k2e1413ag6869688a4668kl9i9j6946af8c14it68469iit69" + "9k688ccq8a69688ckm14460114co46121468os2dorco698a8c469k69af9k688c69kn9k69" + "46co141501kn_0123456789abcdefghijklmnopqrst", + "67144669689i128c1446151446co688b692d12682d2e149h698aaf689k698cor6746co9j" + "itkl9i696846149j128a2e8ccq1468kn46ag9k688c1469cp13or6869019kkm9k69148c68" + "46698a9k14kl01_0123456789abcdefghijklmnopqrst", + "689kkl46698a8c67149kknco688b69136846148a9kkm69kl129i8c46692d14it46af699j" + "68678c01cq8a69144614699i681214699h46682e148c12co69461514os014669cocp6846" + "co14151446or68co8c8aco_012456789abcdfghijklmnoqrst", + }; + + solutions["Large random tree 2"] = { + "123:789a:fg:456::bcde:hij::::rst:::klm::q::::::nop:::::::", + "170103355b_7ck", + "lodk5d35dllnlp03015d1a3536_kno", + "5b17as350103355d1adm02012g02_7bs", + "arfq17012f031a1802366h03355ddllo_7qr", + "19arfq0118031a35175ddl2f02033503_5789qr", + "022g03dmas0219015d3503195d011a0135at_279gms", + "35as18020134ln195e03021a17366hdl5d5b_2348ns", + "as1aarlpfq18dlln1a6j365ddl352f5d5c0301035d02011a01ar_jnpqrs", + "011a0218ar2g01031a3501175eat03025d35dl1aln5basar015e0301355e35_" + "0278aegrt", + "35atdl015d0334lpdl2g1a02366h03355das03dl355b34351a1901366ilndl1a35at_" + "134bglpst", + "dl5d35dk366j5d6i34366hlodl033602ln03011aarfq355d2ffq35033501dl1adm020118" + "at_06hiklnoq", + "6h36fq18dl2f6jar016h355dlo1a34030135dlas035d1836dl02352f1a366i3603366jlo" + "0136022g_48hjloqrs", + "025d2f013503lp5d180135031a5b02at352f035e2g0135dmdl5c173603022f011a2g6h36" + "fq35dk035ddldk01351a6jln36at_0123678adfghkpt", + "010203dk2g01341aln3635015ddlas6j36355d035e02dl5c2f2g35fq36lpar5b6i36dl03" + "351a5ddmdl0335loln5b01022g020301_13456abcgjknopr", + "036hlo352f02fqdk5ddl2f0335015b365d03at6h35fq185c6ilpdl1a01dm1aas03013503" + "175d3435011a020336025b6i2f03fq01192f_0245bcdfhikmoqt", + "5d5b18ar03dl3503ln011a5elp0335as0103172f02dk5d350301dl2f1a5dat022g5b0201" + "5c35365ddl6ilpdl5d355c031a350103366j_025678bceflprst", + "dl01355e17031801355d03lp1a36020301dmfq355ddl6harat2g02011a2g5c0301as196i" + "0301ln3436355b6j032f6h022f5ddkdl355ddl35lo03011a18lnas", + "0102at2g6i03dllodl17ln5d5e181a01as35dl03dk025d2f34lpfq01dllndm3603010335" + "1a035d01dkat03355dardk035d191a01180319355c36_01234589deiklmnrst", + "1aarlnas01355c0302dl19181a2f2g355d36lpdl6j01at6i020335175d01lpdl2f03dm35" + "195dln0103fq2f36dm025e1a6i2g011a355d035b355ddl5d35_1356789abghijmnpst", + "2fdm03351a5b0103ln35dl5dar0203lo34dl191a0301ln03355d0318fq015e031alodm36" + "atas2f03022f01dk036i171afq01363503365d0135365d2f6j361a_" + "03456abcefikmnoqrs", + "03350102185e5d0335362f6jdk011aar17at5d350301dllofq025d1a013603ar356hdlln" + "19012g365ddm5b6i352f5b03022g345c5d35dl01035dlp01asdl1aaslp010334dm36355c" + "366h", + "1a0118355c02015b6i0335as5ddl36lnlp355d6h36351a17dmfq2g022f6j1936035c3402" + "011a02dl5d6i03ar01atln2ffq1a2f033435010203as35015b2gdm18_" + "4589abcdghijklmnopqst", + "356hdk366jdlln035ddl35fq02lo032g5b355d0201dl032flp35175d1a36dm01as03355d" + "341adkatdl0119351a0301ar355e1a5d5car175d5b0203dl35lo5b3503_" + "0123456789abcdfhjklmnopqrst", + "0135ln18fq012f3602dk03365d2fdk35fq2f0203dm016h6i355d5c1aar5bat3601dl0301" + "343518361alo5d19dmas350103dldk35016j1a3601022g025d03dl3601lo6j1aat1a_" + "125689abdijkmnoqst", + "015d355edk5d3503dllo01ar5b1a0234033401at3602171a0301033503185b5c01192f02" + "03362ffq3503as1a2g6h01033519ar5ddkdmdlln5c365d0235365ddldm036ilp36036j02" + "dl2g5d3635365ddl", + "175ddl0103355c1a01lpas1a030235035d2fdl35ln5d6j01dl03ardm022g35035d195e01" + "1a36345c350301dk366h365d0319lodllo355d34022fdm35dk011aat366j36351a01_" + "0123489acdefgjklmoprs", + "355blp192f35015c03fq356i5e6h36355d0103dm1a013517036j1aat02352g5ddl35ln03" + "5c1a5d355d011adllploas1adk0335dl011a5caras023534035d350301180103355d_" + "134569abcdefghmnopqst", + "5d355b03dm5d012f18dk173502015c03366h1901021aar032g01fq03366i021a35365das" + "0302dl35lnlo0103015d351a5c6jdllolp36ar5e34at2f010301351a02035d5e01dl1903" + "5d355dat03lp022ffq", + "01ln351a0302ar2fdm345c01351a5d36aras03026i36035bdl35015dlolpdk036j6i192g" + "3602356j5d2gdl01031a1801at0334lp351aas03170103355e351734355b5d5b5ddk_" + "012345679abefgijkmnoprst", + "35365b2f5e6j5d6i3503fqdl365c01ln35031734015d1adl35at0301lo022f5ddl18lp35" + "6h015d1902360302352g0102dk5d01dkdm03181a01031a355d0301ar1adm0103dl363503" + "as011a0103356h5dasdlln5c", + "363503at5clo6h353601035ddl3502lp035ddl2ffq01355b02035d36026idm35031a0103" + "2f34ar021a01355d1903356j03360103025e172f35as19365d6jdk5dfq3503011a0103at" + "35_0123569cdefhjklmopqrt", + "36010335as1a02015d0335ar1701dm175e03352ffq18015d2f5b181a03356idm36dl025d" + "352g2flodl5d6h34355d03dk36lpdl5d6ilp3501031a01at1a012g5c3503011aas1a0103" + "35_02345678bcdegijlmnopqrst", + "5ddl35035e355dln01dl19lo02012g0336186j5b355d010336172f0203011aas6i363503" + "015b1a5c35dl6har03lp01345d0335dk1a5ddlat36350103015c5d355ddm1a17366h_" + "0123456789abcdefghijklmnopqrst", + "356i022f1a01021703lp19012g5das35dl365ddklpdl5c351a5d0335ln025car01asat1a" + "03as5eat01dk6h2f366jdm5d343502fq2f18dl02fq035e01180336dm6h355ddm5d173534" + "_0123456789abcdefghijkmopqst", + "as1a5dat01fqdk1903as013635ar5b185d6h03dl351702035d35lo011adl2g36lp6i035d" + "ln350301025bdl36dm6j5c35032f5c5e355d366j35dk5d02342gdl03lndl5d355e011734" + "_0123456789bcdefgijklmnoqrst", + "011a35ar03025b5c5e5d35dl03020134dk2f5d3502012f1703011903dm366hfq5dlo6j01" + "03dllp360118032gdm2f02dl0103355d342g1adl01356i3603ar6i355b01at1aloat18_" + "0123456789abcdefghijklmnopqrst", + "355d350301dl1aar02365b6j0301ln35fq022f5d031adldkat01lo031alp3602356h365b" + "6i5c5e355d035c0136dm355d183503dm0201352g02dl5d35dl0301ln34170319010334_" + "0123456789abcdefghijklmnopqrst", + "5d01ln2f196h18033501dk365d6hdl35035dlo5b1a02dl35032g6ifq02dkar2f5c350301" + "36036j1a5ddm5dat6i02350301fq171alpdl35ar015d35dllp0301355ddk5d3534030117" + "0103_13456789abcdfghijklnpqrt", + "022f0336025d011a036j012g170335dllp5dar02033501dl1alp5d36at18dk5b35lo5d03" + "dl6h01lo18355d03366hln355e5c5b6i1901dlar5d1a1903as3635011a015d02032ffq2f" + "dl02as03366i_0136789abcdeghijklnoprst", + "03011aar34355ddk0301023617lo03011a35at5d03dl355d01dm366i3536ln031a6j0136" + "6h355ddl03lp2f5b35035d02352fdl03fq355b2g02ln015c5e2ffq02351803355ddk5c02" + "19182g_0123456789abcdefghijklmnopqrst", + "35345d025edm011a2f03015d3635dl03ar025d2f1adlarlp5b2g36016hdl3503360102fq" + "as036i183501361a03as022g5d355c2f3601dl023503016i2fat1aat186j36355dlndlln" + "5d5b6j_0123456789abcdefghijklmnopqrst", + "5d02012f35033618dk6j5ddllp0135fq175d02dl340335ln015d1aas2g0301355b1adl5c" + "3635025dln5b032f36dm016i03ar5d351aar36036h025d0136loat1adllo5dat03363519" + "03010319355d_0123456789abcdefghijklmnopqrst", + "5d0103dl1a35025d5c012flp6hat341a02030136dlfq03196i0136356j5d36036h35lodl" + "1701ardm1a5e035d02363534lnas010335032f5b01182g015ddk035d35035ddm020335dl" + "2g5ddk3503011a1817_012345789acdefghijklmnpqrst", + "020301341736355e03016j351a365b03355das2f5b020118035c2fdm2g6h353601035d02" + "2g0135dk5d1a6idlar03lp011a035c01fq5d35030134365d19dldk2f030102loatdl1a5d" + "0103at18352f5d36dl_0123456789abcdefghijklmnopqrst", + }; + + solutions["Large random tree 3"] = { + "123:ab:9:4:567::8:t::efg:h:cd:jklm:::qrs:i:nop:::::::::::", + "fqhphnfrckcj_kpq", + "fs019f0229029f03fr3445_1hs", + "031b1a9g29022903349f46fr68_3bg", + "68461b0103340301cl46bc681bbccm_8bl", + "hp7tcl46bcah3403011bbchncjah1a011aah02_67ilnp", + "cj68hn469f1bfr01299f3403bc1b019g02291b9fbcfr_8bfjnr", + "34039f0229fs021b451abc9f01fq9g1bbd34ah1b0302299e_45bcfs", + "7t4734hp4503011a3403ahhofq1a469f02340329fs9ffr011b02290102bd1bbc011bck9g" + "bc_56pqst", + "036801cjfq349f1b45bc02cl29019e021b03291a3401021b45ahbc1b01hncm03021aahhp" + "_23458bejq", + "9ecl7t349f4703021bbc3429466801bd03fs9f1b0102bc451bcjbc013403341a01clahhp" + "_0245beflt", + "fq9gah1b68460301bd29bc349f020129461bbc1acl03fr9f020368ahfs0129bc34029fhn" + "294502_08bcdghqr", + "031a011a34ah4502hofs9ggi03299e0201290229033403469e1a9f9gah021b29bc02cm68" + "9ffs011bbd_345aeghis", + "45ah299ffq9g021acjho2901bc02039g1b344702gi9g299f022902fr299f0129021a2901" + "bc1bckah01hn1a03019ffq1a022902bd_0135679cfhjmoqr", + "bc347tgifr1b9g1a1b45clhp4603bd1b68ah46bc029e9fcm9ggi011b29bc020134030147" + "ck341bbc68011a471b0334cj7t450301bc0334bd0347_2458abcdiklmprt", + "1aahclbccj291b9f01033446fsbc0268291acjhn1b03cm9fah020134fqhobc03hn472934" + "039gclgi019f1b02011aah03344634030102299f020102_02469afhjlmnoqs", + "fr9g451b01029ebdgi1b01ck039gcj2934bc1aah1bck469f02fq0302hp29ahgi9fcm01hp" + "02bc29011a34021b010229fr03bc1b01cmbc1b03cl349f29cj_01234569bdgijkmpqr", + "ck1bhnbccj1a9g011b02giah2901027t9g1b47bc1afr1b019e9fbcho1b1aclahfsbc03cm" + "bc291a340102461a039g68hogi9g293402469f299f030203fs299f2934_" + "03567abgijklnoqrst", + "1b469ebd011bahhn1aahhpbccl03bccj29014503341a0301ah1a0203344729ahhn9f34ho" + "03gi2901frah021b017tbccm9g1a2902019g1b299fbc02011b1a29029ggi9g29bc9ffs9f" + "_045bdefhijlmnop", + "0103cl9g02bc2934ck1b4703gi34ho0102ah0334299fbc46451b291a013403bcck1bahbc" + "cjbchnfs1b01022934479e7thp9f1aah291a0103hp34fq0302011b29bccmbc471b9g9f01" + "_0234579abefgiklops", + "1a2934030102ah9f46ho34fq032947gi0234011b039g297t9e0229bc030102cm9fgifrcj" + "4534290301681aah029f01477tfs34hn1a1b45bc4603clahhn341a1b4601hpahbd68hp02" + "031bfq01bc03291b02ck011bbc02", + "030134021b45bccm0129039f1b01fqbd1a021b01bc299g9fcl02031b34ah46ho031a01bc" + "ckcjah031agi34461bfqbc9gfr4703ah29fs0268hn019e29029f2903029fah1a34ahhp03" + "ahfr47347t47cj340301021b1aahhn", + "ahbc0234684729019f031ahnahfr011a023403299f0246019e342947hp7t469g9e02011b" + "bcahho4703cl29fq9f29ckfr020301hp3403cj46ck6801bdcj1b0102bccjfq47299e2902" + "01bcbd1a1bbd1b_1245789bcdefhjknpqrst", + "46039f9g0134681abcckahfq0301hpbd1b1a024634gi2947ah7t9e011ahn03013402bc29" + "9f1bah9ghocmbc1a1b010229clbdah9f1b03bc1bfr9fcj010229bc1b03bc1a9f01gi34fq" + "fs03453403029f29029f470334fs03011aahhn", + "9f011a0229031bfs34bc9g7t47011bbc7tgi9fclck0203ahbd0201293402011b0301bc03" + "fq9f1bcjbc2947349g1a0129022901gicm9f1bahhpfs46ah9f1a1bbdbc681bahhoah1a34" + "1bbc03011bbd1b0103_012346abcdefghijkmqst", + "ho9f0229030102fs1bbc29349f0345gi3401cm47fr1a7t03ah01341a29ah034602019g29" + "1b02bccjcm01hn477thp1b9g9f689e2902bcgi9g01299ffrcl0203bc293403019f4503ah" + "fq1abd01ah9f290201hp_01234579abcdfhimoprst", + "1bbc011a03ah3401ck1a46031b0134bd0229bchp9g1a0334ah1a0145031b340301hnbc47" + "02342903cjbc1b3468029f0346gi34fr03297t9f470168ah021a9g34gifq0334ahfs9f01" + "291b9ebc0201cl291ahocmah1abc01fsho021b011bbc", + "020103299f1b02012946ho34bdbc461b02gi1a9fahckfsbc03470168341ahnah03011a29" + "349f479g29027t01fq29hnhp9ffrah299fhp021a1b299e019f46bc34020329cjcl344701" + "bc1b01bc9ffr29cm4602_012346789abcdfijklnopqrs", + "1aah03hp341bahcj1afq46bdbcah1bhncm0102032968ahbcckcj1b9f0234030134477t03" + "bc1a34ah0302cj29ho01fs9fbc03fq45340302ah29034668469g34030229fs011a021bah" + "01bc02hncl9ebc29021b0102_2346789abceghjmnopqrs", + "9g29011b9ffr03bc34010203gi1a1b9gck29cj01ahfs1a9f03020102fq1bbd2934hn9e02" + "ah1b0147037t34ho47031b46bc011b1aah68cl1abc1b01hpah0334bc463403011aah1a01" + "02299f2902033403022945_0134789abcdefghijlopqrst", + "9f031b46bc7tcmfs01340302681b460129ckbd9f340368471a34fq9e9f0102ah1a1b45fr" + "29hnah1a1bho01hp9fahbc1bcjbcfr01021a011a9g291b0302ahhp34ah03291a9ggi1b9g" + "29bcclbc1b0102011bbc_0123456789abcdfghjklmnpqrst", + "bc1bbd294634cl9f7t68fs019e02461b290103341a9ebc02hp9gck0103ah1a3401hn4703" + "3446ah031a01021b29bchocj9ffq01frhn0229011b9g45029f013403341aahhpbcclcmbc" + "47ah1a1b7t01022902011bbd010229_245689cdegklmnprst", + "01349ffq1b29bc029f2902011a1bfrcm9fahhn1abdfs029e29ahhnho029e01461b03bc34" + "1acl03011b479g1abcahhp027t34291b020301341b9g1agibcck1bbc1b4701037t4534cj" + "bc03011b1a45bccjahho_0123456789abcdefghijklmnopqrst", + "9f1b341a29024501fr9e2902ahbd9g291b031aho34ahbc9f1b460145fscjcmbc1b0229cj" + "019e68471aclbcah03hp1b02039fah291a9f34474602cl7t03013403ahfq34477t029f46" + "2968hnah021a011aah02299fhn_0123456789abcdefghijklmnopqrst", + "01471a027tfqah0301299g34469f0334681aah0147021b01hoahbc291a02fr0301bd3445" + "cl1b1agi9fahhnckfscjah022946bc9f021a1b03290201341abc032934fqah9e477t0229" + "cj02hp9f9g2902ah011a014702ahhp299e_01256789abceghijlnopqrst", + "34fq9f290201fr9f1a29ah02019ehn1a29029g0301fsgi299g1bgi0229ah01hpbc9f3403" + "4734bd03cmbc1b011aah02cjbc1b46cj0334hnckhobccl030103453468032902011bbc1a" + "ck299e1bahho0103463403011a1b46bd_0123456789abcdefghijklmnopqrst", + "46hn01cj02ah1b29019eck02cmbc291bbd9g1a0102ahcjhocl341a29ah1a01031b01hn02" + "29bc9ffs9e3447cl1b9f01342903340145341bbcfq1b01036801341ahpah1a01hp027t29" + "9f01fr9g1bgibccm9gbc1b9f29010203293403_02356789abdefghjlmnopqrs", + "bc1a299e01ah03341a1b4668ck0102ah1ahnahhobc1a01ah1bcm1a030201bc29349f0229" + "cl0347fs019ffq021b0134bc03451b0201cj340329029g2902bd019f347t1b4701bdfr46" + "2902297t9g01bc1bbc9f342901030134461aah02gihp01ah1aah0102299ffqho", + "021a34034729019f1a1b02fr34ho01299e037tah021bbc2934459fcj01fqbd341ahnah1a" + "011b020302hn0129340203469f45fs473468470301291bbcclhpcm021bah011b1a03bc46" + "3446ck0301681bbc1abd02cjah299g9ehp_0123456789abcdefghijklmnopqrst", + "0302bccl9f3429039f011b34fr02453401bc1bclbdfs299g9fgi2902ck9g01029e1b2902" + "010334bc9g1a46ah01031b684734fqcjgibd03bc011b46bccm9fho34291a03bc1bbc1ack" + "01ahhp1a0103ah02341a014702cj299fho1ahp_0123456789acdfhijklmnopqrst", + "ah01299fcjho29029g01031a2902fsfqahhn019e9f291b9gfr039fgi0201341a460334ah" + "03011bbd471a0334031b02bc7t29019f03ckhpcl453403019g47fr021b45cm0102fqbc29" + "9g1bbdcm0201gi1a02ahhp1a010334453403011a_0123456789bcdfghijklmnopqrs", + "9g2902039e293446014503gi341bbdbc011b03bc0102cj471bbc1b290168cl34031b9ebc" + "479fck021b29bc460234011a9gahhp9ffr039f0229029f7t477t3403011b1a46fsahbccm" + "01bc029f2902019f1bfqbc01029f299f02011aahckho_" + "0123456789abcdefghijklmnopqrst", + "03ck291a9eah01033446021bbc01030234cj1b29039fbd01hpfq1b0229bdbc9g019e1bah" + "0345bc1aah6834hnahcm9f4701cl03bc29011a020102ah29gi9g3403291b0229gibc03cj" + "3446019f68fs9f340334472901027t1ahoahho1a0102299f_" + "0123456789abcdefghijklmnopqrst", + "bc9f1b01021acl012902bc1bfqck29011a9gahbd031b0129ho341aah1agi0229hn039f46" + "29fs3447030134030234684501ah1a299f29013403017t02ahhp1afrah1a34471b01bc1b" + "017t0229cjbc9g2946cj02bd34033402299g46gi9fcmbc291b02011b02299fbccmfq_" + "0123456789abcdefghijklmnopqrst", + "34469g9f0334cm0268gi29fq0103341a45bc029fah29011b34bdhn0334fr47013403341b" + "017t034734031abccj01ah46ho02299e341b011a03bccl1bbc9g3429021a1b011bgi0245" + "29ahckbchp1acmck9f0103fr344529340302011bbd01022901fs9f1a29fsahhp021a0102" + "291a9g_012345678adefghijklmnopqrst", + }; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp new file mode 100644 index 0000000000..6b9222a971 --- /dev/null +++ b/tket/tests/TokenSwapping/Data/FixedCompleteSolutions.hpp @@ -0,0 +1,80 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** These store complete solutions to fixed token swapping problems. + * This is similar to FixedSwapSequences. + * However, it is different in several ways: + * + * (1) The solutions have not been processed further (thus, we + * do not expect the swap sequences to be irreducible). + * In particular, we did not relabel the vertices of the solutions, + * so they will not be contiguous. + * + * (2) The full set of edges passed into the original solver is preserved + * (thus, we expect more variety in possible solutions; there may be + * more shortcuts making use of different edges). + * In particular, all architectures are connected, so there should be + * NO errors when running our TSA. + * + * (3) Several real architectures are included. + * + * I have tried to include a reasonable range of architectures + * and problem sizes. + * + * Thus, this allows a direct comparison between our TSA + * and the one used to generate these solutions, and hopefully will show + * improvements more clearly over time. + * These are also hopefully more realistic problems. + * However, as noted also in FixedSwapSequences, we must remember that: + * + * (a) relabelling vertices will, in most cases, give different solutions + * [even though the problems are "isomorphic"]; this is just an unavoidable + * consequence of the token swapping problem being hard and, presumably, + * often having many "nonisomorphic" optimal solutions [although this hasn't + * been precisely defined]. Thus, we can never REALLY do a direct comparison + * because we're always going to get small differences just by "chance", + * depending upon our vertex labelling; + * + * (b) Many algorithms involve an RNG and hence do not give the same solution + * each time (although, our TSAs are careful always to reset the RNG seed, + * so should actually be deterministic). + */ +struct FixedCompleteSolutions { + // KEY: the architecture name + // VALUE: the problems, encoded as strings; the first element + // encodes the complete collection of edges (which cannot be deduced from the + // solution swaps because, of course, some edges might be unused). The + // remaining elements are the calculated solutions to actual problems, with + // the same encoding as in FixedSwapSequences. Thus the tokens are given, but + // the vertex mapping is not, since it can be deduced from the swaps as + // usual. + std::map> solutions; + + // Fill in all the problem data upon construction. + FixedCompleteSolutions(); +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp b/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp new file mode 100644 index 0000000000..87ef4a4953 --- /dev/null +++ b/tket/tests/TokenSwapping/Data/FixedSwapSequences.cpp @@ -0,0 +1,2211 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "FixedSwapSequences.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +FixedSwapSequences::FixedSwapSequences() + : + + full{ + "01120123", + "01120130", + "01122301", + "01123401", + "01201301", + "01201334", + "01231201", + "01231224", + "01233001", + "0112012334", + "0112030103", + "0112300130", + "0112340130", + "0120032042", + "0120034220", + "0120130114", + "0123040112", + "0123120124", + "0123201445", + "0123300123", + "011230011224", + "011234231201", + "011234233401", + "012032042004", + "012304300423", + "012340254226", + "01023413453445", + "01122340120112", + "01123001450406", + "01201301141320", + "01201342012013", + "01203423203405", + "01233412231201", + "01234542466704", + "0112345001123550", + "0120130120134245", + "0120133401130120", + "0120341532012001", + "0123200456072582", + "0123344556784997", + "0123424526784704", + "011203045001033603", + "011223141215122312", + "012013340113012034", + "012013454245012001", + "012032142001201432", + "012320014514014520", + "01023224536553325347", + "01120345240203634787", + "01200123423523424642", + "01203204252607323832", + "01203405263234206728", + "01231234230112050112", + "01233405617658477661", + "01234254506736234254", + "0102341301451334451301", + "0112030104011501601768", + "0112131224120501160105", + "0120132456050156200557", + "0120234514200114452023", + "0120341532012001153234", + "0123400546244740784601", + "012343563567563578899a", + "0123450226246726020102", + "0123454016014017022301", + "0123456772678459453884", + "010234133445340113344501", + "011201234012014540450112", + "011203450116404501173812", + "011223456758398aa6268a58", + "012013243520130113202435", + "01123453626740013485405329", + "01200304205220565227527803", + "01233001140125302325300114", + "01234564701357012823452839", + "0120324514566745012032785689", + "0123124501653670484565362359", + "01231456478978756a5639b395cb", + "0123241456457572803901803114", + "012345167897070a9b34c2d83e49", + "01020134256271839ab889c6d64601", + "010234015647891895016356344789", + "012330014534062730063823344534", + "012334455062233445017819717886", + "01233456378598a5528bc5d22352ed", + "012345627037898070236237899445", + "0112013453267890a0bc4a34dc538553", + "01203452466573347389977380205234", + "012131456708609434a7b0c54cd908ed", + "01233451463472238395a26934462334", + "01234514607801598060a660ba29a6c6", + "01234564375896451264809645015845", + "012345678479a6b33c67871b84bde79b", + "0112340156702812016356737001129556", + "0112345367859649537134679649538501", + "0120324567860158159aab01589a862067", + "0120345245166789017834201689011667", + "0121304567895a8bb60167307c14457567", + "012314523678233973a818b7170117c5d0", + "0123245615788056396356150180784739", + "012345465789a0b37b5c13c8d2ed188f18", + "012345561781691a3001b530cdbccd63b5", + "012345677481983aa95269b6673a0b1c23", + "0123456786945ab282c8bd2ea205f6g52h", + "010231014524450256452473310102455618", + "01120301451265768934952a62b00996b09c", + "011230450167855934ab72c6bcaba3344585", + "012101130104567895677849040182785613", + "012331040104235647622304180104477931", + "012334456057234512018660011223893445", + "012343456237384209a49b4338c41401d0ed", + "01234350360783950652363a06b1c00661b1", + "01234543362723879a7a87b97c9aad9a797c", + "0123456157055890abc405bc1dc4e92490f9", + "012345678347797ab772239c303de1010ffg", + "01020134453463763458453463761701298229", + "0112034345656785198a650bcd8decefe14019", + "01122343506789753a50ab67c6bd1adeabbdc9", + "01203245642016012016786457894578641657", + "0123345260235246728495a1608bb7017c84b7", + "0123432546678440960ab460460cd760960171", + "012345462789a4ba2c0d4ce08fega4hgage58g", + "01234567689ab7acd74635eaafgfhi9h4fgfha", + "011213042556708912a701709b7cbd2d12017025", + "0112345605017578799a5a6b3c05cdc006de6bcd", + "0120345672356814560187341449142001144920", + "0123145062730123735062382314466223955038", + "01232403500678910a0bcdeb8fg279bchci050i0", + "0123341523567847788015011592348078239234", + "0123345240652378018917013452237852658934", + "0123405467871867622367870140547918871801", + "0123425430678419234284a3b6bcd2ea7eadea42", + "0123454657809a0ba0ca837d0e45f7ag5f5c78h5", + "010223145672786756599aab909a90ab3c0323cd3d", + "0102324564781659ab0170ca4b285a644564160170", + "0102345676808930a801b5cddeebb5150130a3a834", + "012304356270833904a6b883cbd38e4d4fgf1g64fh", + "012314354670622387146201144662237014870114", + "01232454657883938ab383cdefgeheh56dhf1c8aei", + "01234567389a5245738aa09a2338bc68b296233823", + "01123456702893470185701201472834124701704734", + "012034526270869220a56b52bc4586d8ed866b86f886", + "012034567467834274955667a7abc5d856ed6782a7d8", + "0123145301364768193136a6ab536447a619c3366dc3", + "012314567180011229a1b9acdbbe58fg1gbfdhb6ie7d", + "012314567587759ab94b6356b9631475a887a80120c2", + "0123421453678023018039421475234214a25a01ab14", + "012345652718895945a1b41c142cb48dc81c9d892c27", + "01234565473870451259477045477023017047458638", + "0123456712489a8916679001907ab5ab7a671223b5c6", + "0112230445565778955a4b45579c7de41b954bf4477de4", + "012034042056178291254aba3b5c59411da74a417e826d", + "012123400560705805019a5bc621d2155e210fdg059dd2", + "0123124035562374014089013512231235685689013540", + "012340125606789a398b23bc56c5bc8b566d782cc5e756", + "0112313415261278591a300b3031155972121ac62cadbe6f", + "01203104456417317879476479203a23b66c4db620044d3a", + "012334562347120134857923603447563412011260233456", + "01234205567689a9a4b5cb9d635638b589635638635638b5", + "012345526445676489a8bacd209389ea3c231e3c011ecd20", + "01234567890a02b0c42d6eafd57gh867834506diaddii606", + "01123024011224304563574501863024630112302401864530", + "01123415617186961abc7c716196d91a3e93d93ed8bf8bbfd8", + "012132405667895856a767bacda721696ec5056717212005c5", + "0123144235145667018056798a23421480018014428a233556", + "01231445677896679a78015280457814966778800114455223", + "01231456758297490a6297823bcdbc3b537597cd014d141e56", + "01232423516378393ab3cdc7973930ce7f63c79cagce39b37h", + "012334056783935a5bc6d901e87f7030g5988gg5hb2i925h67", + "01233423522365471601783465892352782347342347526578", + "0123345675685648977a753460568b1cbad275c26001d28b97", + "01234156789a36b0674c0112d42336ed01a55e124ca5c00123", + "0123435267879abacb4b242dd9791e02011eec4c2d525f9a50", + "0123453678837964183683a2274579b4c0badb91a223ba1801", + "01234556304514012723456830564527688968561445566889", + "0123455667710180369abc671d71041dd9c81d47718d80bc67", + "0123456738429a8ba26823317350a6688cd738239a42e44223", + "0102341445567348901ab7c43431cd3cecef7e7331b7b9901a02", + "010234534667891578344653673446786789019a78899a786746", + "0112345067014528503412013950348645285034120112288667", + "012013245678933ab815c0757d07207de0eff178930778cee0c2", + "0120341501466772200115587293346746150167349372200115", + "0120344534637617630158203445345863922001761776012092", + "0123034056721867890118893a23723a9bb5233ac5ad3ab52312", + "01230456785952a2b03cd385e5ef590g3cfh2c7a2bb0ih900459", + "0123451260017280579a6b39c87d1201d67d576b23129a45e445", + "0123453416455778236223579001453423166216230134455778", + "012345367581911abc9de7fb0235b075e7fbf58ggah81i45911a", + "01123440015312657834792a865340346512014012783453658678", + "01211340520637890ab2c02d1a219efaeg9e2d9ccbgdchb3cb4ccb", + "012313456407589ab669caa0011deaf6g6f56550cah7010701ifh7", + "01231456467081691481a82b352346c2bd6935e89b14a869465770", + "0123435667829a9bb44c2cc0cdebb4fb36bg4243362ha24cb44ceb", + "01234551643784499a49b3283cc9c59de3b25923511fge0171e78h", + "01234560748498ab088c14deaf4a2084agh6e574i7744aag0dde0d", + "01234567780690a7a0147b301c01cd4e78c49f4gcbhc14i8cbb8i8", + "011234145114617678839a4babc3035d656003344ec334cfc334cf14", + "01200345466207382359456a2b59202bbc9d90036bbe3e03466aafab", + "01212304056276047829abc3d04ba9294e4bfb4b5gh274784e05d05g", + "0121324565047859a61bc601d04604455e0f46d06g67a60f2f3278f7", + "01230234356001787395a9abc995544d54951901be60f65b54g4545b", + "0123133456732338989a297b3c8d8e23f23c280ff50f01170102287b", + "0123245240637624381946a77663a701bacb24d55276a77624400146", + "01234256352335176880011756796880566835564a42233556684a80", + "0123451627283942a8bc23d4b82e8fb8d1gb080hbc1ihjgbh11i7ihj", + "01234516375023011645894a9289681601455001451668894a922337", + "01234564783739a6809b5c16c937bd78375c8023e201ed807823377c78", + "0112343567869097671301a1bacdbedfdbbee2a2gf3h35fi866001i1figf", + "0120130120345675137862495601756220011356346256499a1320012013", + "0120300456781983304a20bcde2401efg2f44h242ie5204j012i61bef4ef", + "012134567618395a84bcd234ed21a3d25ab20b3990210b18473984184721", + "0123145062014714859847a850018550147b471485c0d101a8dc3d2385dc", + "01233456789a3523b6c8a7da8178d2eda7d29a6449ce0c2364ce9a35a7e7", + "012342563723378905abb442c45876611de0233723427661055823378723", + "012345361401700145582314451401708258239770019a45140145587097", + "01234536786714452301785289780a014523361467362345527823364501", + "01120130435401261201304330012667125485266712013098438554854398", + "01123043540112302643678501120179302667265412430130435401122667", + "01201342563556748701206956011301422042744201133513562001132042", + "012304567826390abc7de23feggf2h5i572jkfle0m8cn42h0o3c04pqp70ri4", + "0123340560672894abc9ade37f3623b7bd7f470gb07047hf34e3010g944i94", + "0123345067869a19188223b3bcdb083e82fdf8abag08dhiadb3jb3abdb3jia", + "01234235642342503523426476180164421823500189351801895035234264", + "012342526768759442a0675b0623a0ca8138a07581672306a0ad42de941d81", + "0123433515467487907a6b084376cdc06e869f6g086bhci0909ff787c1hc84", + "012343541678975aab01c93a82d04d0154233a82e578825a97233a23827897", + "01021345165713894aabcd4e94af453g9h2i02134a0a0113aj0aaj011d940102", + "010223400225511678393a23b4ac400240bdb4402339955195a88995517816e7", + "01123034546578970a8b4cd84d71e4d8f74ccg4c400a8h9754e4e97i9i788j8h", + "011231140563378760909a2bc9d6639a04057131327ef7g4142h1214g4ag9ac9", + "01201304015016477829a2200b1c04d747ed5029f5517gd7dhdc781c167ic77i", + "01203142536782791a010320b2821c67363d8e31e32f2053036720gb2h82i7h7", + "01213234567869a6a76b781776ac6d17a71ef1gh17f1a7iaa7j878j8a7haa6ac", + "0123345267138923a1521b3c857152cd2334e4341b1c9f6gch7c677c3c344icj", + "012340567418450112095ab5405a188a5a8c18bd6e56b51223bf74g7gbbf12hg", + "0123451670018770166287019816624945872301703a01162362162301708798", + "01234536478279027a2b1cbde2b4238bf88b37477ag5bd5d2h237i0237232h7i", + "012345564574689a8ba88c28d28ef59ag2569h9a6856622a9a498e9ig2j5f574", + "0123045670808952a20a808926b75c6d52e5453f04g02304266h7aa21i6d1a3f7a", + "012341567892a916b323c7b39250db0dbc3423503441eadbbc92a9920d23eab323", + "012345367286233608849023a9085b4586611c3623729c233661d7362372231c36", + "011231145674789a741474b7ab9a1cd36e31ef744c14gceh7e4e4ci7d374i74cgc0" + "g", + "0112345301677001286746671270017067124628344689281267017067014612288" + "9", + "0112345660015675561248233412600160564889127523120160567a755660017a1" + "2", + "0123434005167890a4b10140cd1d97eaf98egf0190f5gfhihaea8e789790hj5bh55" + "b", + "012345162578982a8bcde2fgfc0hhf0ije4d13jkil45j3m52c7minjo077pq5e2iri" + "k", + "0123453167389abc9c4d96ed6f4g0fhi0jk7jl7ml5n1jn47j33oaipqkd385q0g0r0" + "g", + "0102131425066778092a0b6c6d7e67066f0106020b6g2h6d6i0j6k060jjl7l6i0m0j" + "0m", + "010223455678897a09b77cdb0eb2fbgchdihcjj809db89b7hd78090k4502b2dbd47c" + "b7", + "01202345067270085259a5b7bcd02052e5fefgg74h59e5e4h353ieedie08jiiffgji" + "g7", + "012034353647342832494a4b3c344dcebfcg3c3h3i2jcg322j344b3i3k4l344m4nn1" + "3k", + "0120344565710819abb7cb329420dc08ae71b7cb8a710820ae1932b74565637194b7" + "cb", + "012034563701809abc61d9561bbc630b3ea0fafgahg663379a61ahh33eh36iah61fa" + "8a", + "0123141526700183595a2b2c6de51526f1243fdg6df15h15ihf15he5dg4e3f24262c" + "83", + "012314305646748369a2018b143023744601c830012314dca24656577de557e57ddc" + "c8", + "0123144564767837792307a04b0114011cdcade04f0g0114h8644b6830c44fc4c601" + "ic", + "012324025605576869ab1c0d5eefgf6g066h2i6j02016k2i0l0mek066b0d6ngo6g56" + "68", + "0123241456767890014556a91476453b900182233b82cd78bec79078cd823b233b82" + "85", + "012342251671577889578063013ab416ac63d0ed4201b425163ad00156d06316633a" + "ed", + "0102034567899a9b1cdefghi1jjk9el6mk894mg5i6mn8f01mohf4m07pqik5q5k4r45" + "ps4r", + "01201342015420013613366701855420422054853601130120425498855442982001" + "1336", + "012302245006075859585a9b9c8de9900f2geg5e588h0h240i50j48k588k0l505m5n" + "0o5a", + "012314356726233078496ab0c1678dec788f081814c4e6262330g4h68i6718h68i14" + "jhg4", + "012314456578794aab45659a79c7dcd234aef114g0343h4a01cg34144a010ig079aj" + "9a79", + "0123214135365768791335a5b20c01d323a77ef635ca575657f67egaha1h7ia7aj21" + "b21h", + "0123456474454883943a312b231c313aad1ce22a5a45afgbg25a4h45adi6aj5aaj64" + "4564", + "01234567869ab7bc67d8864202e0b767f65fb923a5b7gf14fhbe5fa53i2302e0beb7" + "fhhj", + "010213453567622482913a5b35135c9135d39e1fg013dh4d2124d3dh099e1f5c5ijd" + "d5jd5i", + "0102234506478729a90a0bb42c2d2eef7f47ag2h0iae02ej0iekalhm2nhn5nhmaeek" + "bk2dal", + "0112324154126741899a2a93ab8bb7ab12412ac6abb77db7ab2ae4124112e42aabb7" + "6768c6", + "011232454626748087092a7b6cd80187243274166e466f01g27b5b45h326326fg2i0" + "099jh3", + "01213453566227423430014892a0ab538cdaedab1ffgd3344101282153h50f018i8j" + "27188j", + "012304562789ab8cdefegfhgh8i0ij0h8kilmlde0nmo6ogph5h2h8h2qrd7aq0sm205" + "qcs2at", + "0123200456712801291abc0d4e717f7g277g50252h0dij0k0l0m4m201j010k0n1j4e" + "0c0noc", + "012324025617899a7a17010b9c8d8ebf0g8h8ij686088k0l088h9h2m9c0g0n0b0502" + "2m0nbf", + "0123245006576879a323a35a572b6c56576d0e5f7g570h5ii0566j56622kik5l5672" + "246c5f", + "012334405667189a0b3c29d23456cedcfghiiddccee57jh99aa040747j23gj01hkhj" + "gjfg1f", + "01233454063475895291a813892034ba6c0691896c20cd52a87e5475c86c06522006" + "6c52ba", + "0123454657689216a068ba8c01516816a001231668458c347d23455745ce342c232c" + "347d45", + "010231402516744668598ab75c250274400225597416d93193311668ed8a1631d9ba" + "93d93116", + "010234032567829325a5a47bc43dcefgghi6j534kfl56ci1j5mbcniopeq78rq87bij" + "kocsmskm", + "011234564785079ab9c06cde9afgaf9ag8b9bhb747i44707jiik56c001kd121i016c" + "c0016c1i", + "01230451677834894834ab268ac6cd67783eef8a34b5g0043423chhiieef519jc151" + "9bb59b9j", + "01230456175839abcd0be83afa7g0d0hh23i58cd0156833aab0b0117586j56586j83" + "3aki9k39", + "01232456407368159a566bbc235d568edfa6cg5d56288eeb6ba69abhbcci8e732j28" + "8e0d2jk9", + "01233145262728929a7b2c929d3e3fdg232h92944i273e2j9k944l9m9n949d4l9445" + "94922jnf", + "0123456478895a3b94459c7c785a944d0dce9c944d0f0b17egce7c1701haa2230b3b" + "0b23ha0i", + "0102134501611708196a060217ab2c1d272c0e0f6a01061g1d2h6i022h1j6k011j0e" + "61ld4d1461", + "010230404563278902a9bc8d89393040e427f6018gcabca97h5i810127638g403002" + "274045e45i", + "0112343556781970011a78b7cb8378dc70b7cb3e83788f01703578g07034b7g0gh83" + "3i3578b7cb", + "01203445611301077890a38b1334ca38a3dade07f08ga8h48i2d2034f16106h42djf" + "f1388i13f1", + "0123244526271891a92a2445b001cadefe8f18fgb0012ahi6h8f230ijcca01188kk3" + "232aij9118", + "0123340350678192abc1ca01c4cdc178e881014f23eg3e232hi5e8bheg50hj34gk23" + "eg3eeg34gk", + "01234254603623421760016017783660238917014217545a78546042362342546017" + "5a01176036", + "01234564574589a323b36b96acdeadf289gh90a3238gib89a3b36b96908ga3dehjgh" + "egdeada3ib", + "0112343506036789383a0bbc0d063e3a3f03011ggeah3abi0b06033j6k0l122m3m03" + "671nbi121n0o", + "012324567889a6bc8556da9ec5065685fg7838hd4i899e38eba33ja623daa6k2gkfg" + "0f06a3244ik2", + "0123405464781891a90a4042bc185d547e78187eef0b405d91187g7edh7i78180bhj" + "5d545ddhhj7g", + "01234546784824297a6b4c2d0edfgh5i4jd101kg1l9ej1mg4j7n46no7hn8apmq0rm0" + "247aapmqqs6b", + "0123456758459a2b5cd8be8f2g27a08b585chc3h232g67218f5cfi2758f0j6672101" + "f08f585c9aik", + "011230014312546530016743542889129a2801658930120143304354126528671201" + "43300112432889", + "0112340305067837395a053b3c011d0e3f033g9h0i0j5k0l1m0jmhgnjo0j03399h01" + "1d0lmh0503055k", + "0120034546782092abb5c520dee7788c2d0341203f4g46fh03202did6i46454g3fhj" + "fh3fk3deid1kde", + "012131045267809a01793b3121744cbdef3g3121ah3b3152ij21k5fk524i747921h8" + "9a79744ik5ij31", + "01230405200627890a7b4c202d0e2f2g277h2004274i207b0j2k2g044lgl2g7m2720" + "04287m0n20288o", + "012345067628971aab578cdee3355fdeg9970h01232806id3j35fk5f0d35fk232835" + "id573j0ja88cci", + "01023134356067281902608ab54c3435d0ebb5ebd52fg628603h310119023ijhkj4c" + "3101313hjhkj7k60", + "01023431516576466894a576b70c9dde6f01cg51hba546i20bhb016f4b0ccgjf020k" + "0ccgfa949dgdcg0b", + "01123425016789a0b75cd104c8e212d1fd6f6g2bd167255chi6g4h6jif6k67122bb7" + "676kk33404d1fd9b", + "0123045678795a5b1c7ded9fghidgjghkied4l597m9m4n4lon2p6pqa8o7pon5a5jpc" + "5r50594i4n4ijs5j", + "0123245678917aa001786bbac9d4914923e3fef5566bba4gd47h4i7173e3gj7hfe3d" + "91e3d4fe4g494i3d", + "010213010425676879676a060b6c6d06be0f0g020h6i060f0b2j1k026l6a2m016n06" + "1k027e67060b7o796n", + "0102342352654748970abcdefg7he66h7h47g9e6028f974854cdi5deib8f52i5jibc" + "544754i5jikj1k97g9", + "0120230405062738232920042aab3c2d0edf202g05202aab230h2i203j23dk5l0e0m" + "055n20055l2d20dk0m", + "012034355626178901a32bcad85de4f535gedehdfia35djg1i34f5a3cageikfi24ik" + "e4d8degejg9j202420", + "012343540506076869060a0b1c022def688f6806022gghdh012i026j0b061k010206" + "6jjidl2m2d2n1k2e2m", + "0123452627787980ab780caded3efg2327eh807i4b7j7a3e3keled8mdbedfn3e3opf" + "oqersfo5453k8fpf3p", + "0102034252647502528742967552a8877564a84b4252c5bdebcfbceb0252gehiehi9" + "96643j030252c5gejgkc", + "0123142536200758190a2320255b3c230d2e202e0d0f01192gh9250i2j1k5l25200m" + "2n5l0f0o2g0119h90o1k", + "012345564714478769abb5c827d8efbg8745h00114ia47fc2jc88727875dd88747b5" + "2jdk5ddkb5ab3iiab556", + "01234567879aabcd5ec4f3ghgf23ij465k50lbjml04542nmi2fogf5e23gjci5mjmgf" + "5pfo8jcajqcrc88s89cr", + "0123456789a8abc75de7f15gfh4907841d2ii9a24c8ja8ak2f9lfha22ma28n86ao9p" + "aqgo8jfp4045ak869lfr", + "011232045056075008595a505b1c015dae1f5a500g5h5iaj5a011fif5i0kblam5a5b" + "50bl0k5h0n50bo5bbo500n", + "01230405560748591a5b5c05070d5e595f5g0504022hhb5b2i5f0dj90k5l4m595l05" + "en5eeo050k01en04014m1a", + "012314561728498a3bcdd92efg055223h3igichj4kfgig565f0hil012e145m0n051i" + "ioic8dcd01ic058d5p5f5p", + "012320042506370829232a9b292c20bd9e0f299e3g9h0i5j250k5j4l2m20230n040n" + "209bbd4d04202m3g9h4lmk", + "01234567524558944ab9cdef944558f538b9gaha8iijga58f5gkj08hga4a8i948hef" + "dbb9944ag6677efchagaha1h", + "01123415678939a5abcacdecefgahi1i15a5gahjecakcaeclelmlehlakin6e1iin6o" + "e001ecpdcdecle8l801q4h4634", + "012001314536785739a28bcdbdbe8145fg782031df018b785745399hhijkk4455778" + "bd8b81bedfec9h31399h310120", + "012032145601207083965a56b153c88332c8011996261de996262070f7015g70f7hf" + "id7j01701d5a5k0k707jb1kbde", + "01201301045165170187174951047a17ab5cdcdeeff220017adghif27i499j49khdc" + "deeff22004499jichidc87ichikh", + "012342532342647687893aabac8dbeabfagcac203a428hig20236442gc9f201j0120" + "235hk53aab895hchgcigeibeab9f", + "012134526706804996abb8c8c7cdc19e21f2cgfh6f96fi9ec767c7c121f2eacdfh06" + "d3346fcdhjfhc8cdd33eea6f3406hj", + "01231425167289abc61de8af018gh0077ei86i7je8c66khlh0017e8m1n01h0haacc6" + "6bboo323p2p96kpq2re87e727c8m2r", + "01123241561273689ab03ccda54e01f2g7eh12fi32f24112b041i99a32fi4efjf2k4" + "73dkg756i93ca5cddkk432f2fi4eehfj", + "0123451206074819ab5c4deffg74127h3i7445485c1j4k23c6123i232l061m0d0n9n" + "fo1f1beflp124d4e1q4rab3s1fjdef1q", + "012134526276896ab1acd55276e66a6221bfb1766221b1b3344gghh8899iij34e689" + "bfb3b1017k07j0344gghh8899iijj0017k", + "012134567832397aabb0c6ad018e343fab0gah7ib0ij7a83hkh4blbman34213o78ab" + "0pbmmqbmb0hkbc8ras7a3f788rckanbc7i", + "0123045067895aab5c0d6edf0g028f0df73ha33hiai88j5agkkl5cimkn8ogpon8oq4" + "gk8fkn2c02040g2r020ddf0g5i500s505i", + "01234156789a1b8cbd500e78f37gh4di0223f35f5j019k5l1m1bbd7ddi411mbk9kbn" + "1bh4h6299op97p7g4qer504101501b4qbs", + "0112343552626787904a186bcdefdee8cdg55283hg346i673501j31252g5fk52e801" + "12iccddeefk990011252g5hgah4a34j37je8", + "012340525657899a0bcdc09d1e9f9a9g01fhfi9f54d49dgj4kkldmlnlolp9gln9dfq" + "r8dm1o9ffq891er8rsr84kk3235285r8rs9g4t", + "0123454614278289900aabcb27defghfhif12j9kkl1m0nnd3d232gfg2j01909i0n9o" + "0acp82d5dq2c8289crab9snd822c82899knt9s", + "01234567849a1a7b9cd267e1f6389g5f67hi01j0ik019a1a2301j0cd0h450138841a" + "9cbj9a1a010hj0hi9c0h38j0bjd27bcdd267f6", + "01201341567617016869ab76170cded4416fe54g56hi1hjdd44k41el4m4n4m1hno4n" + "papqiqhi0h202r4r4nnoloele5asdeatpteae5jd", + "012034453136012072813431902a452001818b90cbdef872b2aghf2aag81b2gicbjc" + "5j45343181f8k7cdcbag722aag36b2gicbcdde72", + "012030435467282030799a9bcdef4e499a1a010c7g06302h7ifj67klef43mn3n434e" + "edcd06o57g2k52206p062q2krlkl2k2006fs2hhd6p", + "012345462775821597a00127b997753c454dde1fegghhifj82j882151f4d454d2327" + "de15eggh4d45754dde4d150115454ddeegghhi7527", + "01234550670894ab9c2daefgahhii7ajhfh04694fg4k9cdl94ahem9a94n042hoh046" + "2mhi232d2mipemil4k45422q01n0erhoaeas45aeer", + "01234567891ab4ab7c96cd89defg2845e08967011ae05fhf7cih96cdde2g455f6789" + "fg2889967ccddee00145hf23ih89ji28fg2gfg2889", + "01234526789a5bc50c7dce0f97gh9h92i25i0c948j92bkc5al5bc5e8cm9a92n00ccm" + "m301ioal0p8q2r92n9n0n992i2io8o787ds18j2r0t0p", + "01234561728950a5bab84cbd0edfg5a445500161c6baa5g57hbdb8346i08jk5jcl3m" + "4c3n6oapc6a40e4qbrbaa4454c34455jjk8k8scl3nb8", + "01022304501637859a1b5c019dd050858e5f85egehdiej8ed2gkl823mn02ob2m011b" + "ob5olal96p023q6r163s01hnehd0022mdi5fht6regehej", + "011232434526078969a81bcde10f011gah0i6j8k5a01al12emfn0f01e1ae0obpbqhq" + "pr2c1be13e3201bp261b12cd2c2668psah010ihpahptal", + "010203345067899a5bcd29ef9g5h9iji4k1l4l011l4l4kjk3jbd5b5mn5jo5m022pmp" + "5mbdf03jfqef29rhecr35snen55hrhr3e6netn5se8ne67e8", + "0112034356372860898abcdeecbc6b56d17fdgh56idj0k607l6l56h5h443dmd1dm4m" + "43126i7f3n017o1p8fjqdj4k407f138f7olo3nnrd1lsdjjq", + "0123145067789ab2cd01ef1f01964g6h65c0bedi2jb2ab966707c0ace4be4k6l1450" + "cm8nc08o78678pcq7r505701b582bsc0b282018n7rcd144g", + "0123242325656778890abc3d8d4e5b1fgfghbgb7ghbg0ihjk0011fgfbg5bh92l6mnk" + "0i8o892383h9hp892l3qh9hj897r78838o3s836778833q7r", + "0123345652233778899ab0cd68eb6f8991bggheiejb8k4jl344mnleieobgejjlnl3n" + "3ppdhqk48k684r34phk44r3p37hq7bks8k68kseo5o56677b", + "012343567282729a23b7ac7223d94deb437fg59a72fh03ig4j4323d901hk4d60d903" + "ac436072g5ci56b7604j037f23fh7223hk03607facb756g5", + "0123455678484595a9abb29cdefe9fa9ab4ghih0044595cjf7kelhfe7m2df79f3n9i" + "befe9f9ccj23bohp3nb2hqhpbq4r7s2dd72d487848202d4r", + "0123456728093a9bc02d958e2823cf2dg42hi0ij4k3ldmnodc8edm9p6ccg6qg4idc0" + "cgdcr22didgog4r66n2hcg4sg4cg6c23r2trr66cc04s233l", + "0112013234567897a93b1cdbae5e05f0199g12h3i67jhk7h5l2m7jdndojd7j571232" + "h3ahae5e053p2qh39h19dn5r57782m8m7857565s3plt1c5llt", + "012334156735879a3b8c34d4de3fg8dhd94ih1cj929k6k8l1mdhh1010nln8l8ccjej" + "a0gdd4o6672paq1r15a2233515d9343f8a4s1raqd48t878td9", + "0123455653300778981a1bb89839303ccde0fd01gh5gij075k5dfdlfl223m4ln2oip" + "ficilf1j4lbqlb4l5rl2bs1b1jijfilfbq3053454ll22oeo07", + "01121345671859ab069c7db97a7e67644ffg4hih2i0106a2hj459k7lab2man4f1o12" + "45bpb959454hhqiq2iab1o679kirdp7a4f2m4h644hhjihhsihit", + "01123456577008292a01305b3c8de3fecgfhe55a0idj57dkhl08h1124m34hle33nao" + "5b3c30cg2a122k8d08dpaoaqdj1r7sh1h72afefhh1122a7s6gaq", + "01231341546718904a41bccdefecbc3b2g953hij13010k36234a366lgjcm06ln9k90" + "2g2obc6l13ap3b95232q4e1316234i4aar4e4als6l36232qctcm", + "0123451607893ab4cd010e357f2004eg0e077fhf3h35581igj01ckc101044l04eg0e" + "044558bmn9b4bop223ck45gq5r5889n93n23p2pbb45lls8g585l", + "012345466728591aab14ac451acdedefag14354546727e723hfi1a4j0kfl46km4n7o" + "23k6acef67jp724jbp7e011a4601ef5qfr455qef46677eefflk6", + "0123456758393a4b535cdcede47f71ag7f7h9739580ibibjjk8k58533ldllmin5cl0" + "0o5p7hb2b69qbr399q01bi4b3l5s450t4bl05s2l3lbj0t3ab2br", + "01122324565267482419a08b12266ccdef0e01fg9hi6efaj0e1k01cl486c12244mkm" + "0e1n26fo8p12a0cl19521201a024cqer5s52e548cf8pcqfoet2452", + "01231452316789a8a49b23bc1475de52f6678931a83g2389efa414h0013123idgjak" + "ci9bbcci3g5223dea475idci14bcf6677552233114899bbcci3ggj", + "012324145617809a3bc414177d23edbf240114acc41401fg17802hi93b23g5680180" + "bf012414fg17bf7j170180dkkii99aacc424233bbffgg55668800117", + "0123456738591a9b2c979d7e9723fe9g7e7001hi3ij3ej1kjlgm6n0o7e7p9gej6e7e" + "706nej0167fqhr2seh1tf6233i2326677eeh702s0o0197fq70979g01", + "0123341536789aba4cd3de14343f0d36g71hgi4g2j34dkd31ld9b823m09n78d33414" + "01eoded36p46m04g36d3d9b9dk7q7r789sbtb96pb878g77rgi4g141lbt", + "01233453621778126296905a8bcdce8f7g2h1e1ijdek7812233llccmemcd2h62cf8f" + "3n9o900553236201ce5pekeq1e3n8b8r17is1i177geq48tg4g485g8r5p", + "0123421256574819741ab86cbde9fef001951g7h56e79ifeej4k0l7h19f0lm424n01" + "0l6blo95mc015plmlqbrmc1g120l01199i6b6c0l6bbr12e774482s42e7", + "012345675847894a422b455c42211ddefe3fg3hghi581j8k456k7l0mj8g20121g2hg" + "hi6n0oopjc1j010oopap4a457e6736131d6q8r368k0ssc0mg3gm0m016n", + "011231043563676896900a01bccded2e1bbccdedef312e1gch12ei2ee7ei0j1bk27l" + "011b2m0jnkmobc2mbp31cqcd67ed8r6867k22mmo7oe7edcd1b31cqnkns8r", + "0123145460017893a0761b0cd7efa0ghigji01bkclm4gfmbcn0cgo7c017pbklq0eim" + "54a5a8787pi2climbr1b01br0e0cbsmbm4a4m4imi22393ata0nfef0e0cbs", + "012320451637238352792a017bcd5ef8823g20hf37h40if3fjhfh44cc8823g20cd23" + "gk7l3g8mgkcn23525obp1qcb5r7s7b4501c8825r8m7scn1qcb4cntcbbp20", + "0123345405066787890aab9cbd8ecb9c9219f1bg06hi3i239j89fk8l922h89926mf2" + "23673423f2fn0n0aao7o87899jhp5qr0h505hpbphph505bgsf010af1sfsr", + "0123345647892a3b2334614c68566d2eaf2a2312af616gh51i2efj348k0a6156af0a" + "lmfj5n0168nonpno8qob6dhr3bh534nlgmno4sobnl343b344cob5nnoob5n", + "01234556789abcc7d91e8f388f3gehh4789agbida20f238f45d9jieh01389aa21eeh" + "0kd901230fh49a0kidd9ideh1e018f786jjiid9aa23ggbbcc70f011eehh4", + "012324143560470849011a9b01cd8d82e2ef6f60015gbh49ci5j2k1l24499bmb8mbi" + "cn4314439oe3bp2kbi1qao3ce3dr148s3543cibi49141qqtst8s8ddr355gcg", + "012324453667489abc9b239adaedefbc9g3hi290f950iji2fif99a5k242345ala2da" + "d4dm5n450o36232445ph673h906q36233hphip0r50a2daeddm2s6q4524459g", + "01233425467889ab8334cdce4fgh6c8g23782bbi34236j5k25cl3m23462nbo70788g" + "gaab2b3m4g012nga6c46ghap1qabbr2b341s1ttj2334466jtj1t2b5k6cbocd", + "0123344565768939a7bc7b34de9f904g39h33i9fhjh37223344g0k8l90h339900b7b" + "a7hmgnhj6gho8m7dbph3hq34gr7ba7hm8ms8saa7766g4g34hogricbp7d3iic", + "012345611778693436a72a6b1c7dce2fcgah61gi23dj1k6l178m34361c8ignlogi8i" + "4p348qgrcg5j4p7ddj5j851cgn36696l3623366117a74s451c6lce854s8q2f", + "012345674821499ab936b7bc236de21b8f48499gb99h01eij449ekeili9amaen1oje" + "eipi8p483il64l525q0q011ooamaj4en0rs0sjj44ll6363i6t676t1b011bb7", + "0102034516177829ab0ac13d7e57035f7g177ghijigj7g573djk0a40782lm45n4502" + "aj0a40mcc88hhi5o8pjqhrgjhi572s0201187ggjjihi57hrctc85oct8pab2ljk", + "010234256076789ab3ca5d02eb603476247f25g602605h2534b37624gi8j6k347824" + "765l79b325028m780nno60pee7bqebo878e7peg6600ngi8jbqor5lnsotnootns", + "01203456723528693a8b5c560dcdce2f6g355c3hi135ij8k20727l5c34lm7l013n72" + "288o2p7372cq28562001cd0d8r28i1725i738o208r5s5601288tij28200dcdcq", + "012304356748902ab3575cdb0435462335ef5f35136g3h04ijjklk0l04466ma62a1n" + "23132o3h2ab30e48a82ai29iijhp3hb3iqirbrdbd9is900e9i90et0e909ietis", + "012324563623727849abc9bde9fgfh8e9iej9aakab2b8le9ak72jf2maniop79ieq78" + "4ee99aab2b8efgahioc92mcrej4s4eanj1ftjfej80j178ft8e4e4scscrp78580", + "012340565787798a8b87bc7dce9f24797bbg577bbcch0h0iij6j388b5k45hlfmcecn" + "24bc0h239f5ocpfq570iij6j565kkr9r79bggshs0hcn9f79bg7b3t3b3t7dbggs", + "012345675849a97445587b67c6d6c60ce01ac21f017ghi7b6j0ce049h2c2ck5l2mc2" + "a9hicnc6hokpbq7bbh2oc2c6dr74d6spdr7b2mkp45c65tck74c6677g6j5l49kp", + "012134455657892a1bbc6d42be21bc0f2gc8eheihjbkbeglcm0c45eh8n1o5p2q422g" + "bkcm56bepi300f1bc8bc1obec8gl5p2qpi45eiehgr8gc8bcbehsehbebcc88ggr6d", + "01234225126789a96a06011bcdedcfghd47i674j1889a96a42edebbc4jdkclmjbn4o" + "121p4j424o2qg042mjmr4jbcfm6s67mt1bcddkmj4j42mtebfi7i6706g0gh122q6s", + "012345166718896a9b161c1889424def9f393g239hi2djk4l09f2301144ddeef9f39" + "23i22m6niol09bip1c18i2cq1c16e618ir14es8gkd2di2ittg1814de2diokodj2m", + "0123456357256896a9a223346b4c576d8e5f9668634525237g7h5f6d4i717bi0bj63" + "6b2kbl633m340n0171o44ii07b0piqbl25577g4i45254iiqoroso4i0iqrqiq4ii0", + "011234255603725801698a9bbc56586d1e563fbg8h256958ai8a3j0301123j1kkl9m" + "9n56ao6958259m569b25122569568a5825729b1k7p8a2qr77212bgas258a58252q8" + "a", + "012034322565474829aba38c322de7ef6g463432255h5ggeai474j7kela3em6nofef" + "gee74746bg7kofge4jpqgp48or475ggp7q47488c9c295ss1so32emotai5s20255ss" + "1", + "012324252312010676893a7b375cd3809e9f7b257g06371h12ai250j9223fk377g7l" + "3a929m92377l6l06011hdhjno8opdpd323920q806r069f0jjn0qns6rnt5n899e3an" + "s", + "01234564371879a29623375b45cb79bd64eccb8fbdg05bgdbd96450179ghg037ec64" + "ia5j5b23a2gdbd015b5jjk23377945965j5bbdgdg0014564793723a2iafi1801gdb" + "d", + "01234567783779678a1bcd5e6f30ad6gah78676ifjik781l678m24233070675c0n01" + "1ofp6f805q8m1l6i2rho8ahs24gh586ght8a5egh456g6fikqk5q45242r0nnpfp6fh" + "s", + "01201343015220657378944394a97b73139cde1fg47b73bd7b73430173chg4i67b94" + "13205265i6bd7b43jgg4jg73527bbdde2001eibd7b734394a9kafk1f0120529ca99c" + "ch", + "0123425046721889ab011c4d23e2e4a40a01188f3gah8i1jek2342a40a3lml2nao38" + "3lmldmdpop18cq1cmr42ml1e4d383lmldm4d727e3gcq7s8imtdm4d7ee44ddm7emt57" + "7s", + "01234567489457ab0ba95c4d929445e51ef1fgehijej1eg1gk4f2lfm16g1gngkko16" + "lp4ffg2g2llp8pki0145ijejqoer1eg1010fgner2ggk2giski23fmit4d92koqo2q92" + "is", + "01121324056787495a5b2cd2efbgdh12d2ai5jjk3k131fefe4499aa71l122c2667a7" + "5a5bbm1ncgaogp1224bggq9q4924120105a78r87a75asee449055bbggq9q49e4se5j" + "gtgp", + "01123243456748945ab5b67c8d7867ae5abfgbg9ae90a601hi8dji4khi457cajjla6" + "b6b5946m8n0hmoaj0p5qa648ajjl8nhijilrjl5aajji5a45hi4k0hlsjlhijijllshi" + "0h90", + "0121034256017589a121bacb0dec0138f2a1bacb0301213803a1bacbecgehg9h8938" + "03if0d4j2164f256ec21cba7bacb4j75a7jkec75644j645675a7a121ifgebaeccbec" + "geba", + "01203456274879ab61bc375dabb5ce56012042c4bc0f7gfh2i20cj2i0fgk5l427gbc" + "jma33n20fo0ffh42abb5jpbc27fq20cj0fcrrsc4fqcrjtrsjp5lcjgjcjc442273420" + "gk4220", + "012314355672480901ab23bc3d09da3eaf6g14177h722335562i6j7kkl233d72355m" + "dn7h3d35daom7k725m3dap4o9qda2372ab3d7k35566gdn6rda3ddabsabda56353dda" + "566raf", + "0123341235236227070181985a6b35238c5dce98fc9612cgh0ijbj5k7l6m072n7lh0" + "8o1p127f816q277ffr8ifsfk7f3507hth9ht0t077ffc8c968o9823355kfkcs8c9896" + "6b122n1p", + "0123452678699ab9988ccd2d12c5beb901df982dcghgdihj2k6l9a69mh9m8c787626" + "2dbe8412bnho78mh0pb91b010p7p98bnbqb9mrho9m692669b91bbq2kcscgcskt842d" + "2kkt2ddf", + "0123454012267112010869262a69269b5c6926ad010efeaf2a0g0eeh450g9i0e405j" + "kl5k5j0eem0e6945klgn6lkl5k0ggnjn5j45o4o771699p0e69fe26fqaf2a1rfsad12" + "26699p1r", + "0112342526768798a96b2cd40e019d1ffg7hf39icj34d4dkkl9d26768798a9dk9d8m" + "7nfoep87980eklkqdk9dkra9dk12010eeaep262c9d8m1fa92a126s26atdk2af3kqa9" + "2ccjojfof3", + "0120343546737897a72a209bac9dbe2a4f9bbegd1h9d20346i01jf1j4609gk6l3497" + "5m1n35011n5m5g463446359d09202aa55ggo6pdq5g344634355g6l3rgdds359t9d5g" + "gd35dsgk3r", + "0123244550265782249a0b501c456d5e260124821f8ggh4i452j246k5745a49a988g" + "ghhl7man82577mgo26ghmp45gq8g57mr7mhlmrgo576dms507l822445a47m4i017l2j" + "hl5e1fanms", + "01203456137548492a3b1c1d01206be01caf13ghcg1cgi13cj2kcg34013b1cgl01lm" + "cge0nen76b4olpgl561c0q5r75347220011ccg132a20015se01cgl7201lmcjet1375" + "5se072340q2k4o", + "0123455667897a60249b5c670d8e3e30605645dfagd989232h5i247aejake6ej9l67" + "e67a8m5n8ee65645mo8pdq246r677se60d898ee6600ddt9l8p8m89d9aq7aaqdqd989" + "8mmoco5c566r7s", + "01234054677879a40b06ac678d7d8e58541f40fg8h584i1j0140k554a4kl5m1f0154" + "8hnk405o4i1f01fp1f0154400154fq1fa6nankkllrrj1jl26s5mlrltrj1jklnkncnk" + "kllrrj1j0106a66s", + "0123453667789ab2909a231cad3e9a363101909aabbfb2fg0h6i362j4fbfb2237kbl" + "3e0167b2bmmn144ofpbfadabbmmnqn3623blfp7k36ri31ds6i36317t14addr01676i" + "ri0hb27tabb2drds", + "0123450678299a6b7c3c1d34ce3c2329cf3c23g20117069h3i7c3j3423g2gkcl2940" + "3i3ckmk4403423299aab6b6nno0p3c4006406nnq6n6bab9r9a34jsab3jjs349r6b40" + "6n06nt6n060178171dnt", + "0112230140564708791ab49cdb47794eb4fdfg9h1240db4efd47b401gidb40jkb447" + "799ldm4701gn2o12fggj1adb5f1pbq8a01mkb46b56406rgnb447ds6b7tdb79b4ds2o" + "011p4779dm9hb4dbdmmkbq", + "011231244546272839a97a272446b6cbdce07f3g5h46011207fi7f070131399jak1l" + "3m39gn9o39a9ak5k4546b6cbcpdc5h3m0q3146ak39a99r3gak31ed01125s45465s24" + "b6ctcbb64624120qeqedct", + "01231450670896a89b3ccd259e593f255059g76796595001141hbi14aj08ak0l8m08" + "01313cgn4g230oa8080114li4p9b968qnrgnns4gg7gn4c4gnt1hgnnrcd1401144gg7" + "67969bbili140801144p8m", + "01234516789aa770b98c9aa778cd8cde0170b4b97f5g455hb4hi78cddj9ab9a75g1k" + "42cl0mb49a8c5h5n7870clb9a78cdocd01do8c8p1q7870a70m8p9aa701b9b445705h" + "422r01a7429ab4b9b442239a", + "01201345622782980401203a1b9c13cd3e01f5g6454h20cf8iej828kclkmf5206204" + "4h4520133e828k0198hn4h134504eof5hp45cff5454h04hq200r4h0113623s6t823e" + "201382620120g9988213043eeo4h", + "0123201401566748146269200a625620b29c5d014eaffg14690agh200i62fj5601ak" + "0a4l2001af62141m4e0afg20b20a56699cncdno5gpafdqfj0120625do5fr56ds5d56" + "62dqbt20011m0ab220b20aafakfr", + "0123404516017840097a9b0c098440ad8e1ffg9b848h9i0140jkk484787ablkm1fk4" + "ad0940k49bfn0978j7jkk4bo9ib2014084dpad7a789b2q094084fr9b0c1fb2as4001" + "40848e7a78as84409b099b408478", + "012034256307204816921a6bac16011d20013e635f3g9hhi1692202j019h92hk6316" + "1a2034169hhl25m04n631601ao34201a63op01aoqm1amr48qmosm092q9aoot201601" + "1a20259h5f922063mr01ao3e1a012092", + "012324450678925ab91b2cbd1efg3g2392b937ehdi6j697kal2392249m45926j37an" + "690106691e5a244oep2324bq924524b91b1eehbd7ref929mb92392b91b1eeffg5s24" + "3745247r5t452324b99224b9bd455sdi", + "012334561701896abc5dae18566aefg56223ae7h0g6iej01177h347k4l3m56g56256" + "gn230gg5eo3m56016aae0g1817626i6a2p01348q6223183417g556pr62g59c7k4s6a" + "at4l89gn7b170gg56a566ag50g01177b", + "01234560761438790a9b5c604501761d5e79bfd2609b7601791d609g0123d2bhhi1d" + "7614794560gj5kdl019g6m602n76799b0160143obp1q01601d76bh79grd2bp4s141d" + "236045d2011d149ggr23604s1q76796m3t", + "01123405677089abc60d67dac8c67e7089af011g67050d9h700183gic8833j4k0dgl" + "c66789c889839m6ngo7ec6c8671g70dpda0d017067aqc6c8899rosdpdago830d3483" + "c81g6770679m01c6701ggo67700dosdac88334", + "01230435167867013916a08b786ca201a0cd2eef01dg16676cch8i1j1678a2676k7l" + "676c8mcdnodp2eeq78018ba2a001pra2166cdpcden01a023noa2a0dpps01dp162e6c" + "a26701a0a2cddp6c6kprpt0116cddpptcd6cch16", + "0123140105607658129a010512013b8c231258018d3e6076f601237g12ah010i60f6" + "fj053b588k2l6m60761201600512f6nfn96m5o2l2pnf12609qf6nfrnr760580160f6" + "n9nf12f62p60n9059sn9nf60f6ftn99aahth9s60", + "01123045260114125708934530935a9b2cbd0114ce1f933001939bdgbd30934h9b7i" + "jk1430bl011230lm452c14djjk5712bd939b7nco93cp3045011430932cblcp9b9345" + "5q45lrbllr309b011f93143093010814459b5q7q7n", + "0120130124356758493a9b9c0d3e8f203a24gh13a63a136ga63549gh9i01gj3a6g4k" + "a6gj133a200113244920al244kkm3a35al4k588n6o01352024201301351p20582401" + "4k8q20245rks4kksstct9c2420a63501133aa6356758", + "012324154647789abcc20b24477d3e3fbcdgc223chfi5j01150bbc3ffic224010b47" + "015k1519flfmbc7d9ndo2p2447c2247dbq0b194701fl0bbcdgc21923dr7d0b01479s" + "19010b5kbcc22447nt7d9n19bcdr010bbc3e01199nnt", + "01234506789a5b6c807de40fcg066c78h7ciijhef280780fe9h79k800678fl804m2n" + "06heh76c78e4f28oheci06ipe44q800f80e4h72rflhe78f280e40ff280h7782r8045" + "066s6cci06it80e4heci78h77880066che6se4ciip4m", + "012345365768891ab223c43dcb304e2fc4b2233645680g8hcb3001c4b21i5j4e23bk" + "6limen30b24501jocbc4455j1i8p233d36cbb22330013668cb8p30b24e45iqc4cb4e" + "b2231a1ib2ercbc45s45c4cbb2233001361i30iq360g5s", + "011230456708733069678abc08733d7e94308f0873aggh8aag6901679b304i690jbk" + "gl1m08mn8aao011m73mp3001941m67733008cq69678a7394ag309b6745gr01699bbc" + "677330678a4scq08016994698a4s1m67mt1m01733001737e1m", + "012342356758239a2b79206c7d1e013fegeh67dic423359j795k6c671e42c47d2042" + "dl016c201mc46742en017d6c2bc41e01en236742do4p237d20355q6cdo67cr42c46c" + "203f3567429j0s200s42c423426cotdo677ddo676c23c44p355k", + "012345674078099abccddeb440dfag0996fhai679abcb47j40jk3h09bcgl966mcdag" + "67b47jbcc2b4404509bcjkjn7896b409go7j40677j09b4bc9aagdf0940cdb4bcb4df" + "40gp09c29a96jq2rag9sc2gp7j679a9sbcai9609b4400996b4677jbcjnc2", + "011203344567589a2b3c34455d01de6f031g013412034h3c01gi03458j1g588j3445" + "865d035801gkd93403863445586l8658121g2m125d2n011g3403de6leagkeo34mp12" + "45de5ddeoqeoer0134de033412455dos01de5teo5doq34de2mer125dmp010334", + "012032341546010718698a5b5c15ad0118208e2f32341g46ch8a6iaj5cck15012032" + "5c1534jl180120328acmaj8e341gjn4o34pq325c018aor18018a20aj32js0734466t" + "8a15tq341801328a2001323415aj4o8a5c1801op20324o3432204o01188aajjs", + "012342356789ab87cdef75fg1f1h5gd2d88iij4klmnopqr00hshndtpmdm8u8vwfxfy" + "z1ABjxCiwCw7bDDEE6F6dGn4s3He83pI8icdcmm8vJhfKyLKLM0NGLB0OmrPQMRg4HOA" + "SdABfyfg1hcTRg0Ntp3fdGU2VRhR1h1eeMLM2LU2rUrPWo23taSdOclDOlOccnn44223" + "s3JsvJbvDEbXTU6YalsYok4kuZJ6WoquvubvbXXEDElDlJuZJ6cmtqqmcmcTT44HTUU1" + "1eUsU11eUssY", + "01203456789a4b1cd11cef1ghe01i520j0kjlmjm1gjnoh9pq5hii9e9rstuuvvw6w56" + "x50x0yzyAfef300yfpB4mCCD6w76E7EFFGcuHFnInCyt34Jq3JKzKLLAAfDHBmtMmC2h" + "NOFPBmBQrll3300yzyKzxEhi4RJqsoOF34FPRSSOO8T84by1bSUQDVmDdN0dmdmDQDUQ" + "Ub4b342h200xxEhtohDVetheuva676SWXc1H1ccuuv6v6PaYdNNOEOa6d1E7dEd1y1tM" + "MYaYRExExtRSXcZMIZtMxtxERERS", + "01023456789a696774abcdeafa5ghgijiklmfn0nkoo3opqrsqhtunjuijov3vf3mw6f" + "kftpumxyzxA6x76feBC13D4D74Azz9Eg4wjeeBFGGHIttcko6gG6HJGHGj6ggcpKf3Li" + "iMikkoo221elNcO8C1PQ8PRQHeDS78G6yCPQ67g7hgrhqrqAL61Tg7C1n1jfGjFAUJJC" + "C1JVUJ1TO8yOyC7Cg7AEFAFGGjjff33DDPvTxWhosLLiXYYUUyzyXHPQrhMuHegco3g3" + "gchczxelulMuiMz9opxWsLf3lZjfeljeeljff3o3optpItpK", + }, + + partial{ + "010213_123", + "010223_013", + "01022304_12", + "01120301_03", + "01120304_03", + "01122340_01", + "01200123_12", + "01200132_01", + "01203243_01", + "01230402_13", + "01231442_03", + "01232001_13", + "01232421_03", + "01232454_13", + "01233001_12", + "01233123_02", + "01234142_02", + "01022302_012", + "01120130_013", + "01120130_023", + "01122301_023", + "01123024_124", + "01201334_024", + "01203104_012", + "01232101_012", + "01234564_025", + "01234565_034", + "01234565_134", + "0102130104_13", + "0112010301_12", + "0112013001_12", + "0112233412_03", + "0112233423_04", + "01122334_0134", + "0112340425_03", + "0120133445_01", + "0120321454_01", + "01203243_1234", + "01203432_0124", + "01212340_0123", + "01213441_0123", + "0121345365_04", + "0123120401_13", + "0123122334_03", + "0123302342_12", + "01233412_0124", + "0123401201_13", + "0123404345_12", + "0123405246_13", + "0123425451_13", + "01234356_0346", + "0102011301_012", + "0102013413_014", + "0102133413_013", + "0102322450_023", + "0102345363_146", + "0112133413_123", + "0112234035_015", + "0112345054_014", + "0120134245_134", + "0120324345_125", + "0120340142_124", + "0121013004_013", + "0121344254_014", + "0123045215_134", + "0123200123_123", + "0123202301_013", + "0123244536_036", + "0123301401_124", + "0123401201_024", + "0123404301_124", + "0123405425_123", + "0123455667_024", + "0123456324_025", + "0123456476_135", + "0102031401_0234", + "0102032402_0123", + "010203425436_12", + "0102130134_0124", + "011223345165_02", + "011230014330_12", + "0120133401_0123", + "0121340350_0135", + "0123030114_0134", + "0123044223_0234", + "0123200124_0124", + "0123242023_1234", + "0123242024_0124", + "0123245035_1345", + "0123403554_0234", + "0123413141_0134", + "0123413145_1245", + "0123422354_0123", + "012342450667_13", + "0123424530_0134", + "0123435013_0345", + "0123452450_0245", + "010201234534_014", + "010232456445_514", + "011201233450_012", + "011230013043_123", + "011234536768_470", + "012001130120_012", + "012003404520_231", + "012032430156_125", + "012034256324_136", + "012321013405_023", + "012330011435_012", + "012330232401_123", + "012334054534_123", + "012342055405_135", + "012342540554_123", + "0123456426_01356", + "010203011401_0234", + "010203240224_0134", + "010213014001_0123", + "011223341223_0134", + "012013012042_0124", + "01203245643736_15", + "012034130120_0124", + "012103410141_0124", + "012131403501_0135", + "012304010304_1234", + "012334022345_0345", + "012334231223_1234", + "012340344001_0134", + "012341513052_0135", + "01020345011445_135", + "01122301401201_023", + "01122334120112_023", + "01123001304330_012", + "01123001433045_012", + "01123024011201_023", + "01123440450112_123", + "01123452356335_045", + "011234563656_01345", + "01200134153220_124", + "01201301425320_013", + "01203243322032_134", + "01203432011501_024", + "01203445622334_136", + "01210342012101_023", + "01231223011412_034", + "01233042452330_123", + "012330454262_01235", + "01234015164735_125", + "01234563561517_125", + "01021345341301_0125", + "01120301122445_0123", + "01122340454001_1234", + "01123001122412_0123", + "01123453604634_1246", + "01200123202423_1234", + "0120234565768798_14", + "01203201434520_1235", + "01203234322001_0123", + "01203415567475_0137", + "01232024200120_1234", + "01232415122612_1245", + "01233004534647_1346", + "01234256467861_1357", + "01234536647898_7152", + "01234542506727_1257", + "01020131010456_02346", + "01020343566402_01246", + "0120231401205201_124", + "01202345142645_12456", + "0120344562367308_138", + "0120345362472428_714", + "01230234052301_01234", + "01230452055236_02345", + "0123243523355035_024", + "01232456401556_01246", + "01233454500156_01345", + "0123453563678798_034", + "0102010302420203_0123", + "0112010301244524_0124", + "0112300124122401_0234", + "0112345460514786_4512", + "0120032420010546_1345", + "0120130134130120_1234", + "0120340113012025_0124", + "0120345676633448_1457", + "0123434562467646_1235", + "0123454162060106_0134", + "0123456748426015_0357", + "010213340113010501_134", + "0112030112430503_01235", + "011223123415122312_025", + "0112343524366035_01345", + "0123401501252625_01235", + "0123404540130113_01234", + "0123423523563556_12456", + "0123456405450223_12356", + "010234014523342302_1245", + "011201300112241201_0123", + "011230013435303412_0245", + "011234300134122512_0134", + "012023040152366736_1346", + "012312234001544640_1235", + "012314253623016001_1245", + "012314500125502501_1345", + "012324155647789225_5093", + "012342302342454223_0134", + "0123452062473447_123567", + "0123456557805893a5_1256", + "010223400256788597_12368", + "010234567047258396_01245", + "0120345640716783_0124578", + "012340563778155662_01268", + "012342504165714898a9_013", + "012342515601513001_03456", + "012345404617784296_01246", + "01023435230102011601_1245", + "011213241201505675_012356", + "01201334252001201325_0125", + "01203245637687588745_1347", + "01203450561735515683_0264", + "01231442142305011442_0345", + "01233456154534782356_7620", + "01234056721868911821_1358", + "01234235567823757378_0248", + "0123425617807215914a_0136", + "01023014014556144501_01346", + "01023440025334366736_12456", + "0102344523344502230102_145", + "01023453467534533475_14567", + "01123445652367342386_03468", + "01123456677819674589_03579", + "01203214015365768765_14568", + "01203442533420677867_13467", + "01203453160501051738_01245", + "01231435600160563523_12456", + "01233042566774235685_02457", + "01234052160137655216_13567", + "012340563571083182_0124678", + "01234525647037237045_13457", + "012345536037284512_0134568", + "010234563750317898_01236789", + "0120324553642076458764_1358", + "01233445416274286709a0_0629", + "0123401201124023561264_0345", + "0123401564710178122901_1345", + "012345367884234992_01234789", + "0102344056074840488548_02457", + "0112013001241253014667_01234", + "01123101450637408445_0123456", + "0112345167870696a56396_12469", + "012003455336722672877298_134", + "0120130104133513460104_01246", + "012034156375877698a98798_014", + "0123024523261401700114_01356", + "01233004353067085635_0123478", + "0123403425167301282634_01256", + "0123423520062335573520_12346", + "0123451402012602278002_01258", + "0123453617788223404578_02345", + "0123454617890542499001_13578", + "01234546704158013170_0123457", + "0120324567894a9bba968936_1578", + "0123421456423556677856_123458", + "012342504675371801865075_1348", + "0123450673805345582873_013467", + "012345143670018098a26b8a_3215", + "012345422360578598a43b64_7135", + "011213010456455718019013_02368", + "011234231201506701686912_04568", + "01201334453401136376870198_025", + "0120134225012001562572_0234567", + "01203454657617018297a9ba3b_013", + "0123244567583680212321_0124568", + "012334522365527687657623_03457", + "0123345653271502277890_0234678", + "0123415260017451417485_0123467", + "01234353436062077879a4_034569a", + "0123456237233762468462_0123467", + "0123124553678996636723_01234579", + "0123140150521627012816_01234568", + "012334056207030186239a_0124568a", + "0123402562781597257515_01234689", + "012345563728236860941a_01234689", + "0123456718975a191b31018ca1_6024", + "011223144512141223364748_0134567", + "01200342456457204258454259_13567", + "01200345262026640726200126_12356", + "012134500667780150133734_0124578", + "012304056206578538855771_0134578", + "012320140523678059960167_0134678", + "012334202320353617340117_0123457", + "012334516746415167528552_0123567", + "012334545650768956967696_1345678", + "01234035644067122358014035_02467", + "012340421526014042741815_0134567", + "012340566323435672282363_0125678", + "0123425667606891ab4546ca_135789b", + "01234512062357867806019012_01348", + "01234530567862235662453001_12348", + "012345626787849819306a235a_12357", + "01234564378015450115799879_02589", + "01201301422035136756350113_023456", + "01203245607660140160144568_015678", + "012134235162708601519570_01235678", + "012313456273148729237301_01234789", + "012342054036234207403894_01234569", + "0123434135267884415923a0_0123458a", + "01022340252601170178022540_0123456", + "01023452130667863402677995_0124679", + "0112344567016867576780689668_02378", + "01123454460176383449604601_0124569", + "0120233452678926853a52853b_13569ab", + "012314516272578696239a3a_13456789a", + "01232414156470012823377014_0123456", + "0123345667614872902367016156_02589", + "01234003546401172668260103_0123456", + "0123401556367487017440150174_02478", + "01234042252674422340180140_0234567", + "01234231152331234260748660_0123467", + "0123450256014534234572839a_1234689", + "0123454667788001122339239a01_02579", + "01023045461708018930434a0843_134679", + "01023442345067027202788978_12345678", + "011213245012265712135801199a_013568", + "01234052647518899775185223_01345679", + "010213455601783784699a78844584_02478", + "0112345660707809010a9b416936_023578b", + "0120320134204532206307204832_1234568", + "012032452034463407322032344801_12567", + "012034567215638948013456155663_12469", + "0123044225044216011704804225_0134578", + "012312455067017445506774866798_01356", + "012314563001473014234775825647_12467", + "01233415467421897ab70190c923_023469a", + "0123345671891ab323c0250b2b28_1234679", + "012342543065761823650197425465_13678", + "0123450445673879922338566779_0124568", + "0123451460278679a89aa886792723_01349", + "0123453652170501840923364529_0235689", + "012345627438873823621662380190_03568", + "012345637879a16b56199c1951d2_13468ab", + "0120130450011336738493737a73_01346789", + "0120230401350620578223063557_01345678", + "01203456728497a44960b8842072b8_01458b", + "0121324567785001217178842921_01235789", + "0123042560766082680960680482_01345689", + "0123122415121567561501682367_01234567", + "0123145676864850977235233556_01234578", + "0123301401566714789549140130_01234589", + "0123302415236223300178155484_01234567", + "01233452642307183468699a4bcb7d_012456", + "01234025670828297a7ba1ca7a25_125689ac", + "012341153673078449a2b9c29dd6_013469ab", + "0123453056010734304523340758_01245678", + "0123455676856009605652859a23_1346789a", + "01234564787935409a2312016478_01235678", + "01123045674683679879016746453083_01256", + "0112312456472478851201240978566a_02467", + "011234304501633001341247486301_1245678", + "01234015623423014015623401155715_12456", + "01234254657837807823377880018078_12347", + "012342563523675635786719569442_0124578", + "0123425678955a76abca293d6e42_1234579ce", + "01023045160738943a6b931c5c4530_012479ac", + "011213044556047004455845040119_01234679", + "0112343536786097018a2997789739_01235679", + "012003435346036784466718017953_01245789", + "0123245627389a8938546056a775a7_02345789", + "012331040151064723859854859869_01234589", + "0123402506472840499a5449259b0154_13678b", + "012342415001616761416136238495_01234567", + "010223456478457659a559b6bcd5_0234579abcd", + "01231224500106501223012467066801_0134567", + "0123405221014061748640983aab05cb_0123459", + "01234053366137389aabcd9506eada_18a256b4c", + "012341122541126758655867072339_123456789", + "01234526407847790a45644047780b0a_135789a", + "012345501601377482453723743798829a_12356", + "0112304564450157734530016468645745_123567", + "0120345653571787985a170156491798_0134568a", + "0123245663237428247497281474148a_02345678", + "01233001424567702682269aa6264845_1234568a", + "012345607458360123179a4501745860_0124678a", + "011223144512606714682912140129601a_0145689", + "0123045667829539569523398218019582_1234567", + "0123145046013550143523011446674678_0123456", + "012334234562342357344578578978899089_02356", + "0123345627231225085901125a5627127b_02678ab", + "01234042565728744257239a0b49405c9a_01369ab", + "01234567891aab8bc9d88296895d822331_035789b", + "0102011301456772524502787252789001_01234678", + "0120314351060175205889317543946758_01245789", + "0121034251677521510142800151755101_01345678", + "01230456789a3bc81cd016919a090227_012358abcd", + "0123145213235226789313011323938926_02356789", + "01234035678930ab2328bcda73abedfa91_12468abc", + "0123452034234506722006233448342372_01234567", + "0102034524564502240748028948038a2402_134569a", + "011230244501362412300157788924459a5724_02346", + "01123450634701623463126247857850011247_03568", + "01203442566715200134839820423483989a98_1367a", + "01230435061201277804122923277a3b2312_013456b", + "012304500216754849507a0175044b047c50_123469a", + "01231450678578390150856778966778855001_12456", + "012334526113700187708798a0261334876a_0123468", + "01234561674678126792616a12bacb92dc51_013589c", + "01234564571886186401927945574564799223_03578", + "01234564761280586401458058456401122376_13467", + "011201234054367840011285693623120140_01234567", + "01201324563501078079130107202401355635_024679", + "0123451678097414a221b620a215161cb64d_025679ab", + "01234546378582601882233945a523424623_12345679", + "0123456786167349a535a573677335165b01cb_03567a", + "0112034536475003455038010301493a45124b_123568b", + "0112234567408912011845400112162a121623_123579a", + "0112344034560140678334407983019883796756_12345", + "01202345160114172028496a16147b20011716_125678a", + "0120345674789545747a93a1bc7ab73b7a_012356789ab", + "0123145001267385509a68abb78568ab9a26c9_1234579", + "0123421450426501502365017687144223763923_03567", + "01234562768489a1569abcde0deb013cdebc0d_02345ce", + "0123456416378270012337167064452382235937_12578", + "0123456507861298932398a386bacb5c98455c_0123469", + "0123456789abc6daed4c5081fef44cgd5h6h6i_13579bg", + "01123041560789786a608b044c04d807308e_6701d982a3", + "0112344567589a1bc93cc99d6071ce0a7ec960_01347abe", + "012304205246789724a21bc14692d18ae87d_1235679abd", + "01231245600156601278893778018923122301_03456789", + "01234354436164541789682689a561645464baa5_025789", + "01234560170158344572237234174558895845_01234567", + "012345637858196a359b3cd149ef4e4919_0234568abcde", + "010213456178797a3a68bc67d33c9e4da99f9e_01358acde", + "01120334500675783950750150066a757b030175_0123689", + "012032042563572820898a0432252832636b6332_124567a", + "012304567338179863ab6cd6e001fd7aag4617_1ce7530a9", + "012334567517400140756856342317751756014001_12678", + "012345366758147936822336674579823658452382_02479", + "012345466378920ab448c5866345d4b92302_023456789ad", + "01234546716001829660a623b039237bc2dcaee8_1246789", + "0102342564071489ab9c126a2d83e0fb64gf91_0234578adg", + "0112132412501667245839011613015a3b5c50_01345679ab", + "0112233456452312700156701223344568566845_01234678", + "0120324354678695185443320120320143865418_01234579", + "0123145220232001362023372018011820233920_01345689", + "0123453617845245846901905223693669235290_03456789", + "012345673889abb88c890889878d7445085e_012345689abe", + "0120130142200546174220485905010a46ab055c05_134567a", + "0121345165637897abb8cdefc376b7abgc5e855h85_1248acf", + "01230430566738098a26673bbc38232d8eb00e_0123678acde", + "01233454600347899a7a47b8c2de8515bd8589_12345789abd", + "012345266427400826644045095a452b2664264ccd_013478b", + "012345340645175872231701177223453423457217_0245678", + "01234560789ab2c79bd2e9fd5ghii5gdjf5k0ed8l6_13489bh", + "0102134567648298262a8a264b3c2d64124eafg8_0125679acd", + "0102324305260278327978ab744c9d8ae0c18f_012345689bcd", + "01023456725328398a483bc41c924d7214e1f1_012345789cdf", + "01123004056761873039301ab01c711a7de7fc_012345678abd", + "01203445413678977a2b0c010cd2edc8bcc8d2_02345678abcd", + "0121345678973a91bcc77d62e4bf5g9740ef1h09_123458bdfg", + "012340015465721701893889237217014054011723_01356789", + "012345167289a52bc342d712e1fd2b95455ge814_13469abcde", + "0123456782745045199819011950019819829823458219_0346", + "012345678309263a231a7b1cdb1e7f31ageff5a7_023569acef", + "0120130456787696a0010b5008cbdaa00b9ea0a9_0123579abde", + "012334566735564829a91ab6cb35d2342935a9bdd229_024679a", + "012345423567716713712086677109a8421335866713_034568a", + "0123456780521639018098233980988001166716012380_02478", + "0120134501678456458967847256672045566772200113_134789", + "012113456789a4a878b71c212ddea48fd745cg1c21_g1f26b8c95", + "012334456241789745a474a42b5cda235e355f3523_59b3ae40d8", + "0123453674184118741930a4baa00cda01a1190e_01346789abcd", + "012345624789949ab3c0d394d4474e9416b36bf994_2c867e09b4", + "01234562787209ab0b62bcdae00b8a78f4700951_012356789abc", + "0123456786709ab4c30dcea360fcf40gceeha0a3ce_1247abcdfg", + "0120345326017668929a0b2620344cd29234ca266826_18a29345b", + "01234156783996ab39798ab3cdbd78cdab8a1c01785778_02358bc", + "012345067889abc0d4e7d3b68f37g2c27hhi17j2jkkllm_035689a", + "01234560170108890260464a4b60088c08604660088d60_13567bd", + "0102301452467889a1144b6c4de8feghi4jgkijlflaffl_01279adh", + "010234567897ab2c7d4b477e14b6474f29ab14ga7eah_06b83ag1ed", + "0112034506473803690150033a0306011b1c454d12_01245689abcd", + "0121134056789a1bc16d9ce91fgh2h69890201560i01_123579befg", + "012334353627613890024a27b42338bc4dec90c49e_012456789bce", + "01234250060758599a582078b6c3d40bde3eef3ec3_012468abcdef", + "0123456706801901041928452a80284b4c1d012e4b_12345689abde", + "0123456778953ab5a9345c0d8e8bb5f8b00gb08b4b4c_e5f7dc0892", + "0123304223354657420830354923420a46302308424b3c42_134689a", + "0123452445500645272445505806890a5b500a585b8c_012345678ab", + "0112134526731226849a695bc7355b1326121326d3ec_01346789abce", + "011234504673654634466550872901122901506546347334_01234679", + "012032435416755443012032430168168901203243544332_01234678", + "012314456738529001692367144552389014450114014590_01346789", + "01232452367893a08bc013125a3deaa05a3f9398g89893_0134568bcf", + "01233452054652235278013495466a52234634466aa76a4652_123589", + "0123401556376801741501403701562315013792569a7440_1245678a", + "012340156436642301157240236436237289977289233664_01234589", + "012341561778099a979a788b7897a39aac977867cda3_012356789abd", + "0123425467892a42466b462a0554460c2d48422d050e_01234569abde", + "0123435456600789392ab40c252d5408ec43566f5625_0123456789bd", + "01234543657880595a9b097cd0e70959f101459g099h01_124579abcf", + "012345675638596a5b3c86de6738be13fg86383dg5f36h_cge749b021", + "01203425167869a19bc3de9fgehc1hif7j1j01cijk0dc334de_14689bd", + "012034355678795ab56cdbd008e6fbbgha5ab5db83_0123456789bcdgh", + "0123451261728779127a23a4b8c4726d7aa4967996d060_01356789acd", + "0102234567829ab8c83de4835a5b5a682f4f3g23022fh04f_014689bdeg", + "01122340546512764001654012546576402301382389120140_01234567", + "01123045672653014548123045530145306701122667799a67_0124578a", + "0112341536273829122a30151b015c15381bbdbe1b121b_012356789bde", + "0123405647126201804094a947942b470c7d2ef247fg94gh_9e43a50718", + "01234536752889a8b2280302454cd2ed453f5db35bg53f45_b143a6fc05", + "01234542632372483123879887a97287b29887a97287caa99887_013456b", + "0112231240356778011240847867593523124001122335594084_01234569", + "012324566735705623891aa801671a7001677056675635562370_1234589a", + "012340567839a267b67c6797bd567efgb2059g7c699g2h6db6_02458abcdf", + "0123455067458612919a6b0158cd865e5886c56fg21216126b_bad6g14e37", + "0123456786749067a3b886bc8defdghagi59iejhjkgiglljjkk0jk_12357f", + "0123240546375208232905529a29244b9c290d3746050d5229_01346789acd", + "0123456507689abc8dcea7a3df9g0hhijkel6mmncjloe4jepjp0_134578abk", + "011234567001356770671228125667958a0170122812670156677067_014579", + "012314524670011438706946877001237052873887700114017087_01234578", + "012342302301154201678930964a01891567581501422389964289_0234789a", + "0112341356704802019136344ab44cdeb4fddbb191926g36_de0c217f9b643a8", + "011203456786739001a1403b01c573676de603d93f73g63h0301e6_123568acde", + "01234015640140521501645223375240892315526415a6238a896401_03456789", + "0123425674173623809a01aba7a242c217d3427417e1abbeab23_0123456789be", + "01234556040768455604524569045a2b52ac01560d52682e525a_12356789abcd", + "012345646745869883abb998ab0c862370676445700c5d456486_012345678abc", + "0120320420153267011806393a206b3220066b3c6d322006670620_0234678abcd", + "01234256678798a0800bca2ded3e3cf87ddga07087ca0h7da0_123456789abcdef", + "012345367890ab6c7de22fd1291g5dghfij2aedkjl29imlne2no900ggp_1248bej", + "0123456764524528695a6445ab5a522c5a5052456d6423456d5001_12345678abcd", + "0112341513267801797001155a782b1501703c7d011512133413122b_" + "0124568abcd", + "01234205546705082942544a05ab540c46054a6746ab54056d460c4a_" + "0134789abcd", + "01122345467089943a612b3c231d12e11d3fg380hb9b233chb2370_" + "01234578bcdfgh", + "012013453652789a1bc93a930bad366e6f208g167h6ih67hj178_" + "012456789abcdfgh", + "012034561345560120341345344572560120011372346845564568893468_" + "01234678", + "0123425001678603941a18b4ab1a12bc2342d686b4ab1858d51aed58_" + "12345679abcd", + "011223145678984a456b7cd1ae569a0fag799a01ah791i2i5a016b_" + "012346789abcefi", + "0123456172389aa33b03237275c101d99a3ba33efgdfh4hgida3a0_" + "012346789bcdfgh", + "01234567829aa66b369ac99d94e92f94dghc455i3i26d323cd2f67_" + "d74f931ha25ibcg", + "011203452601070824692a12ab2a266c244d012412016e262469ab4d08_" + "012345689bde", + "011234560748941ab0c54dd11e944d016d56c5b0b3cfg3b3chiccf_" + "012345679abcdefi", + "01201301133452204613011334204613527552012001135234754668894634_" + "02345679", + "0121234565761821901ab3c0cdef1gh2ei21c6efhjcd1gh25f90hj_" + "01235679abdefghj", + "012304356217869823793562865a9835236286017917627998233501046286_" + "0234569a", + "012314560701143898a11bcd5e2cf1231g2chc2ciaa4426362a43j_" + "73b248ac15i9gdeh", + "01234506476447420158790a0647640b45422c237d4764060a0664477d_" + "123456789abd", + "01234567786736297a64234b36672c64232d607e67364b23647a360160_" + "1345678abcde", + "01234352236107527890a61bc4610107569b52612da6dc90528507788552_" + "0134789abcd", + "011234015640077819053a7b01193440341c011c3d3a050740343e344007_" + "01234679abcd", + "0120130134451367782082782001672013823478132001208213786796677820_" + "01234569", + "0120134256570148054220590aab424c204220050a20ad4e42200aab5705_" + "01245789abcd", + "012334524623788097159a01781534529715800178809715522334469a789778_" + "0134679a", + "01234056607856981ab11ab7b1a32c7de91ab1984084234098b77dde7db7_" + "01234589abde", + "01234225424617489ab4c4b4abb22db2aba19ae9179fg7677hg7fie9_" + "8gh45093eic627ab", + "01234516027835975ab9359cc4d401ce2cce23f25g270292hd7845f2c47cc4_" + "a816fgbe25", + "0123453621178983ab3c2321d19e59dbfa3g0123h2aijdihd121d1jd_" + "012345789abcdgij", + "010234526040178991a3344b343889910160c696d6efe8890260026g1h_" + "cg9467a83e02f5d", + "0123021456617848392314a0ba0256c5239ca056dbba61a0db023e2302a03e_" + "012345679bde", + "012314563572802356019a49b77214cbb780d88535499a4943354785b7aecb_" + "01245678abcd", + "0123243567869a9634ab7cd7676a8618144eaf4ggaah6a3d67d7676aah_" + "02456789abcdefgh", + "0123431415566789369a910bc0dc822e153f0b23b26g6h56fijf0jfi6h_" + "01345678bcdefghi", + "012345603718297a01049b1c3d183ef3743e1g41e4hd1g3e23ifh32329_" + "db86h09271ia5gfe", + "01233143536517898a31172823ab2896438a04c54c6528c2964c04d0044cc228_" + "012345678ab", + "012034566718799a199bcde1feg1e8c5566779h6fe739b9i79df9id37317_" + "123456789abcefgi", + "012034567849175ab39c01d6563e34f349340a50499gb33h6hd634499i34_" + "023456789abcdefh", + "012345400672899a3bc71c23d6ad89d66538d63e897223727dd64501403b3e01_" + "0134569abcde", + "01203204015126477804193ab3b23a266c9d26aef520044g5154a5h151f5h1_" + "60fha95b3478g12", + "01230456170189a07b173c7d2aaef8gfc7e5ae2agh7bbi23dj3cck6h6l7bkmmn0117" + "7d_023469a", + "0112134035011627403801494001124a49014b4001132c124b166d1638122e1216_" + "012346789abd", + "011231345657363456806940343669349a4bc5dcc956366931018001c93136be56_" + "03456789abcd", + "01123404526786986ab0049c86826d2e98121412fc262eeghf8698iehi6df9_" + "0123456789cdefgh", + "01203453367585099a6ab6ac504d64aeaf75g04d9ab6hbhi098bgj9aghhiaf_" + "01345679abcdfgij", + "01233445673724789a9bc3ada22437e0c34513fcbfc44g34baeh13ad4ic44i_" + "01345679abcdefgi", + "012340356476871976647640582a8701352335584064872a76644087580119352335" + "01_1245789a", + "01234223255637161889abc2d1e1f21g188h8i012ee8f06a2ec2jci9c3358i_" + "c368hgi5a7f4e910", + "012342256276862972a776b008ac8debfe6gb442eah7aceb62ib2362b447h7_" + "iae8f64g71c309bh", + "011234564770286334015647126370479a9501703456633447563495706356019a95" + "5663_1245789a", + "012334455678934a12255bc0d4e3239f09c0c18145gdd4gd5h12ia25dj343f4a12_" + "j5cebg9f4d73201i", + "012334564728696abc3d133aa413acec86a4f8234g86344h6cbc5i23j35j0b475i_" + "012346789abcegij", + "0123451670017870120179702a01147b166c4d7012144e1401122a707b1612231216" + "6c_01345678abcd", + "01234564786790147ab1c6d76764e6bf3bbff515g3031h147i676j3d67d73d7ig3_" + "0a1fgi4d928b6ce3", + "012023013415678549a0b6c2230120d034a07a67a015ed0115a07a6723c2dcc223ed" + "8eed_013456789abc", + "012324506517890a9b9c19d13de79fg823175381h09cg801814ih024j2244i8i81d1" + "_2bj39cdeh81ga4i7", + "012345567870922a233bcdd2e2cd438c15f9g114g13h242acd2392233h3id3cd8c3i" + "_897hi15bgce304da", + "01234252657253879620653a9673533b87c5defdde53d86g9hc53i96ehj001j0j996" + "6g_1346789abcdefghj", + "01234256371778233617917ab22cb2de9fe9gd3723h2de7abighhb7j23i1911701e9" + "179f_a3b645die79g8fj0", + "01234354306785295247ab0c43b4d8302943e90c4f0g3090a40g8hhi8hd8da9jija4" + "434f_c5072g3j9fhb1de4", + "01234035640740189a0b07bcd7d57bbe64f4gf7124h1bi1bfhh1fhbigf4h8j18241b" + "188jf9_4j30hg9fa72i6d85", + "012023045016011678793a3b9c9250cdd6e6ef8gh9hci792e103231392793j92233b" + "139289i7e1_06h9725b3e1fgdic", + "0123415212644789a8840b47c72de8f0851712235250g0f0afhe7i17eahe7iaffbgb" + "122j12fbaf_064hbf391ieg75dc", + "012345645789a6ba15c6a01d5ea6355f396g139h1dg80da0i13935a68939c615i1id" + "43jca00da0_2ce76fd53jbi9ga0", + "0123145678491a154b14bc4b201517144dde566f15142g5615fhbi56ijbk4b6f4d14" + "56150120011723_0134689abcdfhk", + "0123424567895ab4bc4d0e8fbge24542fhc8gibce25j5k8945c85alb0l0e8ffmb445" + "5645bgb4422n42_0134569adegijlm", + "0123456728197ab26c67de20266f7g1d64hi017jdehkhb208lb2hbhk1d01hm20hbdn" + "671d7o4p64677a_13579abceghkmop", + "01234567893aab5864456c89de8064585f5g52233a8h58528d6723588ddi8d0j3a58" + "803kal585258800j_0134579abefhjk", + "01230224561778392a23bc7de0df354g022301173524bh4i233j23023k7dblbebl24" + "e0014m02be177ddn7d_1346789cdegjkl", + "01234567860469ab525c230d5e5f5c2a1a3g0186c80h8i3j232a5c3k45862l5fab04" + "451a5223013jfmjnao_0134579bcdfghj", + "0123452647892a232bc87ddecf7d475a3gdhd2aidjakjc233l4md245232a5a4mdjd2" + "aneoc8308p3q2adh5a_0134589abefhjkm", + "0123245678492a0b0c1a0152de566f56b54ggh56ai1aaifj4g0k6f0156fd240b2l52" + "6f566f522l5752fd244m24_0135689abcegikl", + "0123453067189abc5d144eec0ffg9hi2jkg53lm1lnh35eo601kbnpqn677brm1k0fs0" + "tq2tbc3lcu7bk7ngvnln_012357abdefghjopst", + "0123242546405768690a2b7c259dbef2245746cg6h407cijif46cg256h245725f246" + "577k4l24014m2b4l2469469d_134679abcdeghjl", + "0102345627687318198a1801029b1c1d5e270fgh0227345i19019j8k7l199g8m2702" + "8n19i001fo19gh9g271di05i5e_02345789acdehlm", + "012345161762781917ab8ccdce2f9g78168c6417ah627864ci8c6ja0160178ak19h4" + "4l01a0012m622n1716ab456478_123569bcdefgijl", + "012345565067620189abbcde3f1ghij88kfhkc56cil5mf3aenfhnoompdab56n33aen" + "j77aj7j80j89qon3rns9tss9_1234679abcdeghikos", + "012345676268492abc6267ad2e627fgh4i9aj62kel60eb4j012e621mj61g6001bc60" + "eb672een7f1g4i4jeo2a452ej6gp_01357acefhijklo", + "012314056278925abc5d6efghiajcakl62388ham38j3013nopqcc94r9314q5pk05ro" + "klopsntqtbnu01hlg0bvsnuwfg9j_1234678bdefiklpqrs", + "0123456137505223893abc0d50010bef1g848h5e505eef5e0b0d457i238486bj0152" + "235k4550523723523750010b1l010b7m_013479acdfghij", + "01234562783923a0b76a949c85dce67839fgdh9ci7j3cklj6b014m9nof5pejoqrnsj" + "n53nj33n5pst78ton8of7u0ii7n4_12345678acdfgijkpu", + "0123456572589abc3def14ghij5bdkb3lmmfgn4o3d8pimb801b35bn8j3ijqignon3c" + "ako55bkrcsmtj3ij3c23u2j3imqivq_034569acdefhikmqst", + "0123456782298a3b5c829d2e4fg423453h8g2979g49i5j453k5j806l82238gg42982" + "8g9dm86m6lm82e82233k23804f2n29ko9p_123469cdefijkln", + "01234546735809abcdb59efbgchb2ijiklm2n7op2ih07qqd6rm77q23qdoe7323fh09" + "a4fa8qqka4fhksthqke27q1746238q8c_0234689bcdehijlpqr", + "012345677813953ab46bc9cddefegfh35hijd567jklm5nkoe2b4d5m6defegf45fpe2" + "2hqnhrnh2hile2feqslmfpijiltpjk67_0135789cdefghimnpq", + "01234553672890abc8d5364959d59ed21fghij7kl9mnno876790b8p70g6g7qen0g90" + "ab0gj4iag187c84l1fj47k67p76grplssm_0123478acdfhiklmp", + "011234567089a9b3cde3fbg3fhiaae8jklmnopoqa6rs7mtu4vw7nxwkuy5tjsz9p3dn" + "A57Be4w7zC5tDiDoEf7mB8mF7mlE_01234689abcdefghilmpsuwy", + "011213454056077845914007a440014bbc4defgh6ie9ijef56eaeka440077l914507" + "564007017m07011n7g0740456iio56456i4007_01346789bdfhilm", + "012023141567891a01bc20d6a81a2eabbfag8h6i5d01j8d61k2j016lbma820j8ab01" + "23155d1aa8j814dn155dao1abm15abbccpkq1a_1234579aceijkmn", + "012345611789a8b13cdea6fg40hdij01kflmnl0oen3pqairs9f5nia6itubnq61kucq" + "pu68qiu4013p1v3ccqdeeqhdqj9weqv90xjs_0125679cdghjkmopsx", + "012345678549abc4d9ec3fghdi6j499kk7lmd9nlgcodpj7m5730qnr66j8riqrs3etp" + "ueknvi45e8ug8rr55krsueiq56e86jbswbwx_1235789bcdhjkmopqs", + "0123456789abcde6f6gcch57icjhbk1aalm05n1f0f21659of61fefp6bil9lbqb7rrc" + "bi1a679sp6epef1fef23kg9kp4epeffq3tt88u_03478bdefhijklmnop", + "012034526728594a5b20935c59346d52758e4f28eg4h8i9359522j3k34758l677552" + "935975282m8e6n6oo0280p8e6n525b34bq93344a34_1468abcdeghijln", + "012134305678349ab3cb4decf8gh6ic5jk3leg8k09b3ec5mc5na26b0opq7krse4j78" + "mi8kebb209gbkrgh5mgbrtmupqhvkr6w26b2gbog_12468abcefhiknpqrw", + "01234564678669400ab89c8664407d67debfeg0h64427d67b8de647d67402i64dj7d" + "bkb8bl67863m42b8en644o6964867dbkde0p40427d402q_12356789achiklp", + "012345655789abcdefg8h5hi1j6hk11chi511jlmjnoap6ekalgh2q231rst85qugvg8" + "53wsfxw4oyk15zmA45mtsxsBls3Cw4tD434E43_1345689adehijkmoqrtvyAC", + "01234567689ab6c9dc1bef2ghijkl73men67opq7qrps58st5fgl0j7s7ubv1bwxnuqc" + "58l7jkko4njkqx46h4ylwxj2zwxul9qc67464e_012345678abcdfghjloquwz", + "0123456017680439824a012368826004bc0d4e04391f4g683hbi0123822368041j3k" + "4e6023l6011mc16nbl826ol6opblbc6obq688260043h4a04_123579abeghikmn", + "0123456789a27b4cdef14dg33h93ijjck93l93k0manoo4hpl4h13lqdg390ag2l89c5" + "hqld5ern0fc5mr0h2l7nsm4clqtlnt2tlq237n3hh17b23_124789abcefgiklmno", + "01234567089ab4cd2e1f8ghi0hjklbgmno1662pqrsnt6nfuhjsdsvvwxym9ez868Aip" + "pBtqipCab4ya45D5q5yErv622tEct4nbFGxyhxqFGw_0125789cdeghjloqrvwxzAEG", + "01234567897a3b2c1bd623ae2fg467h3ij7aklmn0odp3b78jqenrserteer23ruv7ar" + "w7a9idb49bkhxmm9pyu5jw737hw7zAqBjqzhhCklijiv_" + "02356789abcdegjlnoqstwAB", + "0123456789ab355cde6001f4gf86hg72ijb51g9i2k89jkdlmbij6mk3no9ikpidjp35" + "3qm22kmab3r6afmab44ois6m2bs2b4b3notnutism2no234om2_" + "123578bcdiklmopqrs", + "0123456272895a181bcd75efag0e9h8aij01kbk6lmnidoc81bp00lbqrs8tuqcd1c18" + "5defvtwxeccdmyb2iknwbzemAnBe72emxbkrCkb2ldnCCb_" + "01346789bcdfjkmopqstwyA", + "01234567689a6b5cb26d2e2fe46b2e2fb24g455he4f9fi2j2e452ff0e42e239k890l" + "f9f09m3n0ofpf0oq23fpf96d012ff90of0f99rf9f09m0l_" + "123456789abcdfghijlmopqr", + "0123456789abcd29efgaheiej48ikll5bmabnoappqkrstkl9o97u88ivnwx3m1yqznA" + "Ba26aAlaCp42uc0udArCnoieptqxyd26bw9bi9wDEi9b14ab_" + "035678acdefghknpqrsuvxz", + "012345602776581901455a609b6419011ccd4e9f4ggh6i6019ij6i016064ijgk6l45" + "4g1cm5gn2o4g270145641c5p45764q604576gr4g272m2o5p64_" + "01346789abcdeghijlmnopr", + "0123456278590a1bcd5a2e01e1f3gh3e0ai55a6jkilm3cmne0m4230gj44iioj4p08f" + "fqjpqdljqri58f2pstghespukvu5kipu23fq5982e0es2p8fpuu582pu_" + "135679abdfhklqrtu", + "0123045267189a8718bc59ad8e2fghi6j59kglilm8f6no9m8pm859q0bjampo0rsn9k" + "k0tq9kjuepuv2a4efi9aj2vwafctxybcqxtq3fctc552v33fc5233ggh_" + "0345679cehjklnprsy", + "0123045678193a8b1a9c1d9e07af783a233a1a01g0ah2i19072j8k011aalam2g78g0" + "019ekn9o04191a4p9504199o011dam2g8k1q07kr78011q8k07_" + "012346789abcdefghijklmoq", + "012345677869a67472120b9cdefgcfc5hcij5k452l1mn1ok94pn5g5kq7pi2kc5ri2s" + "9t5gusojvdwx1dypzp42pryAdByzCu12xCzwn1zp4DroEvpr1d_" + "0345678abcefgjknoprswxzA", + "0123425674789abcdef4ghia4hj6klmkno7469p19qr8d7igsp6tq2suv914sidep1aq" + "q37c9at7pnvmawxeyzz77AAB8C7Ajs1DAEF79a3G7A8AF7AEyFF7_" + "13456789acegijlnpsuvyBCE", + "0123455637458498822a205bc94584cd8e2f235bg00hbi4582bj5b2345g0840kcgg0" + "20823l23cmc982988nc982cglobpnq3l0kcm23823r8nnq82233s82_" + "012345789abcdegijklmnoq", + "01123456789abc1de4fag9hi2jjkclmnelfomeelp70q5r9a45esjtsuv72jqbwxyubc" + "kz95dAlBCpdpn5mnmenhuDbjgnhiesjEFBlBxeGF0w01aH1d2vv995_" + "01246789bcegilmnstuvxyAF", + "01202345566784397a7b6cd0e1dfgh46igjklifmino931p7cqlrrslitunvswaxf1yd" + "ryhodz8l0Av9dfxBtd8CDh8p62pyCj23ydsEd0sdghg2200xd02062_" + "0123456789bfhjlmnrsuyADE", + "012334506789abbcdbedfcgaghbc3b1ij2k301gd9llm0ndopnqk2o7f0kdodb3b9rf4" + "k3st5puk3fuh7fhv7w13axgh0y5uzsuAkBxCobqgukto0kobDq8D3b_" + "0134568abcdghklmnopqtuvA", + "01234567189abcdef2gf243h23d27ihj3hkaj73lm9nkkjhjh6gbbo6pjqh6hj1ron3h" + "cn0ce523s9t0de1siuvtpid2wpgxcopio3co1r0r0c3lvgjqrkkqrk_" + "0125789abcdefghijlnoqrstuvx", + "0123345647899abc5dbe8b7f388g7h3447hi5289bj233038309k7ffl7h9aem013034" + "898b47389n5od9895d525o23d98pbe3q34479r89387hhshi3q8bem9r38_" + "12345679abcdefghijkmoqrs", + "0112342563745879346a2bc09d7463799e9f7g340701799f124h2bfi259f0j07127g" + "6c749ec00112076k7425015l12254h5m6c12012507124n747o4p0701c04n1225_" + "012345689abcdefghjklmnp", + "01123045463401276830935a45125b6c5d014ef5341gh993h930hi463j2k013034hl" + "1gcm6n466ccmhf343o12016p3001122qor34455s4t12013045344630016p125s_" + "012356789acdefghijkmnpqs", + "011234560768507956018a9b68cd1e505f078g507h07565c7i685056075079j5683k" + "8g0107glam1n8g56688a567o68500107561268p1508g5ccd3p5c500q3k0r01p1_" + "12346789abdefghijklmnopq", + "0123456789abcdefge8h6ijklggh74mfnop0hqgn0hrhst9jfuqvwxc2rg6seyzllnlw" + "4AB9zl2mhegdefnyCDB18tdms4Ecp8yFfuC4dq1bs88hhGnytk4ns4te4A4tC4s4_" + "13579bcefhknpqrtuvxyzADF", + "012345465745894a7b579c457d57507e8fa86g0h2i2j8f2kk446hl45575mi74afn45" + "506g89a84a0h8f457ok42k57500p2j2kk4a8454q4afn010r508ffs8f4550890r46_" + "1346789abcdfghijklmnpqr", + "01123456570839013a6b918c7dbef5566beg50bh08573f56f5508i08011j8k0l6bem" + "0108568n0lno1p085q6b56be6ber56500sbeem0150561t6b565001088n08011j08_" + "01245789abcdefhjklmnopqt", + "012304567897abcd4e8fghij6k90l2g26m6lnopmqrdobs0tusjqkv7wmn6mabta766m" + "cdgxyztl04rABrwk6yeu56tCDgDinEiFkn1g8w016y1wwk1l8wbFli8fgx1wijus0D_" + "013589acefhijkmnoqsxzBEF", + "0123456789ab8c9de4cfcg8h1i3bjaklmn0opqjbrfsl604t3iti2tuscfgvhwax9gym" + "z801g0xk8gAe8cvaBt6BcfApabmo67e6dCmDmoAezA9d6089rf0o8ccffEcf60Appq_" + "0123479acdefhjkmoqryzACE", + "0123456467178492abc1defcgh2gijke5lmn4lld7ode9217pkcm904l7q34m7brld7s" + "amjctai9gh5luv922gw592sdhxam5li9ghqlijg3y37ocm0ymo0ctuft01jffttu_" + "01235789abcdefhijklmnopqvwx", + "01123415677819ab9c305d36ef5715347g1h573ihj5d5kjl15785d1hdm01deef7n5d" + "hj1h15hj5d1hlo36jlhjlo6736jllp1h157q30jl577q19dr5d5s9cdt15jahj1h5kdr" + "hj_01245789abcdefhijklmnoqr", + "01203456789abc7defg1hi8jkb0k15klmel3n86o2089563p780kkl3o5i5qh8crhijs" + "ometsenhhjc36mmun72gqmjsgvmtw5g17dn7nh8xh85q8j1wwl8x1w155iyh211y56_" + "02345679abcdefghiklnoqrstuvx", + "01023452672890abbc2de0fghi3fjk9l1mno01pbq352krqp5kcst3b54634uq4cq4vw" + "exydcztvjAdBuh0dCDb0Ek5k6s3bsD6se0fy46524faji046hafF0dhpdBpotaoGpohp" + "i0ae_0123568adeghijnptuwxzABC", + "01232435236738294a232402bc011d4b24e0bc0ff67g024h0f24023ie0jk4h670lf6" + "23me4nmop302230ff6p3me3qe002me2467mr0f2s6j237gf6me0f02240l6j4ne0017k" + "jk3qme_124578abcdefghijklmnopqr", + "012003245206203728099a2b2009bc9dce9f2b2009bc9gghei033j2b0109205203k5" + "clbc9g093mkn52o3ce2b3m2009bc52k52b52k5kpce2bei9gkobcceeq3rce03bc202b" + "2003o33r_123456789abcefghijklmnpr", + "012324567589ab6cca9deffgg9h9ic3j9klmnfo7p5h93qnr56k6snamtd9duag7cavu" + "diiw4vkitdvxnrg95vvyabr289nrizfg9ig97k9dfg75d1vxsf9d9iuaf7e8efp5p4iz" + "p57k_123456789bcdefghimnpqstuvwxz", + "01234516531789abc4bde00fe09gh8fijk9lmldndm0fdopqp2ri899p9l23iks8p245" + "mltets69s8rie0rufrv5bw530flpe0dmgv9gw93x45esrchahshac4abyxv3gpjggppq" + "kv3x_02345679abcdefghijlmnpqrtuxy", + "012345467871896a278bcdeccf57agf4hgi3j4klmh45bnojj6p6aqk8783kragsit57" + "7k013u0a50v5i360pwhgjpoj57jpp6dvgswhsx607k011b50rapwbnwmwrra0a50xnsx" + "klgsag_01346789abdfghijklmnopqrtwx", + "01233040567889ab7345cdef8e78gahijk89el0h24kmbkabnokppqrqhrlhej9ansbk" + "kppqo2tgbu301v89cw78bkgc1x7e01kp30abo720s7catgcwca1xstab01s70ltg73s7" + "sts7lj23_0123456789acefghjklnopqrtwx", + "01122314563745481214905a454bbc7d9e236f123g0156233712141h4b233g14451i" + "bj145a5khl7m4b1237bj149n1h2337bo7p4b90bo3714455q01hl905k122337147d4r" + "12144b12sb4b_012346789abcdfghijklmnop", + "011232456574189abcdeef45ghi0hjck8lmcno270201183pmdqii3o374deah6ep6om" + "erqnns7t6eurqos9vuwuawefx6gy2x9gs9eznskesanoawwkke322732gyo3ezhwobmc" + "cwmc6eer_01234578abefhijlmnpqrtuvwxyz", + "01232024560607280669066a7b7cdebf20eg4d06dh077i244d6a7bj5208k280620l2" + "8k6m56jl6nj506de6n20eo24204djp24jll224jljq07jl4dl224de4d24l206jlerde" + "7s5607202820_01256789abcdefgiklmnopqs", + "012013340135130617787901a2ab3c6d0e17dfag0h0613796d7iej3k0106dl6dm3ln" + "20amdl17a2011o13lp066dm3am7q177idla213177qlr0106133c6d130106dlis7i01" + "177i016dis0e0h_01345679acdefghiklmnpqrs", + "012342454223067839ab5c9d50233942ce5ccf5g9hei4523425g235j0k4al4454m5c" + "3945cn239hcfce7l5cab50co425jp0233q235cl4422350qr3q5cceeip07p7s0t7p50" + "454a234245500t_013589abcdefghijkmnopqrt", + "0123456758971a1bcdef6ghif667jhd6cdkelkeflm7nmoh23f67pbdq3di36rsqt3us" + "avke2tiem0k0kewx2i42xik1ba4536dqi32iavwk3dpbdqopiei3td6gdgtdt3k1pb5t" + "y2588u5te11b_012356789abceghijklmoqrstuvw", + "0123245674189abc40de7fe901fgdc1h459i8jkl0m31md23gn0mlo40bp18014056dc" + "q2klrpskdt6beubp45v9tk746btu0v562445wi403xjw30wi0vyjxzg6ve5d6dg61vx1" + "de1vv99u9iwijwwi_12346789bcdfghijklmnoprstuvw", + "0123456782894abcdef4gfh07aai74jaklmnabjopqrgstnd0duovu7jd7qj1wnllgxt" + "uondpqpyoigswprzmnnlpv1wlg0nnd1dstzxicoiuo1wgs7a01d6b3gf6fg8d6wpe71w" + "1ee7def8eq7aabde_012346789acdefhjklmoprstuvxy", + "010213455601047038971a1bc39d3e139c9d1b3e456fbg1h04561b019ibggjc30k6f" + "bgbl6m13451b1h013n13013obp041bkq56c3450170971bbp6r56049ibs45011b016r" + "700445705t97tm9cbs974570_12356789abcdefghiklmpqrs", + "01023224567892abcd2ef3dabghaa0eij3021klkgl01d9im90ngop2eqjb0j9r8sbnt" + "7ukp902o0o322evmb0snf3785fqjwij9f33u7u93ntqjxd903uuyq5imywpv93vm566r" + "56q58ya9bgabdaa993bgqj3u_0123458acdeghijklmnopqrstvy", + "012345343667789a9b5c45345d369267ce45cfdf5d34366g7h2345gi347j3645gk6g" + "929a9223jlgm363067347j6n6g5cce456ogp6ggkgpcq6obg676g6n7rjl675c343623" + "gp929s92236g673t7hbg45343t45_1235678abcdefghjklnopqrt", + "01232456789abc6c3de2dfegghijibkl43l3elegg55m6n9opa4q070rrisnldl75t7i" + "qsucncvnwxyj1wk0zA6bfBlCij5CCbBD248EbBEAF9eltF9Gnae2ek7ibcdHHfIancyF" + "bJ2401zxyzyFtFkK_0123456789abcdefghijkmnpqrstuvwxzBFGHJ", + "012342526738294a9b5cdef9agh2ijeklmno521phqprpffics1tuvnwlnun2lc36c6p" + "1p12xhx1ywcslmh5zlh2Ay2l39ky42eAmyBC9jtDnw123up32942pBEd1fpf3utFlmCv" + "voFmlu9ntFnGuvluoGln_0123456789abcdefijkmorstvwxyzABCDFG", + "011230044567082901041a1201ab2cad2edf121g2c455h631a04450412hij25hhk12" + "014504455lam1aad1g30456304451a01308n5h6o636730450p1aaq1a08hi0112hr04" + "2s455h8n12011a4504011t455h010phr_012345789acdefghiklnopqr", + "0120345676879abcdef9dghdgihjekkl2m194nao1pfjfqgrf9fs192t0b011duduv5p" + "6awxy7yzzs9sqsakaAeBCb8676kDqEbFpF0uakGHaAHtxonoCIqspgGJdegrHy9A209e" + "yK79Cbbpthy7dgGHeB_0123456789acdefghjklmnopqrsuvxyzABCFHK", + "012342567869abcdefghi7jklmnopqdrsjjkit9m1kotk8787ppuvh9wxyzAxpk885B5" + "vB1vA0C6DExyyF3GcHuDp3cd7pIJuKLcJe69j7Me23HN9wIE0cIJOPEmLbb442GMJw6l" + "23b4QOLb7pPELccdLcEeb4JeOP_036789bdeghikloqrsuwyzBDEGIJOQ", + "01203453673895abcdde8ef7g2hdijak38h6l321jmnflok08dm016hdb31fpmhqpgrp" + "21ab6qqsgt06l338akbc5bbcchjaabtu3vm0b3abjaachq0h8diagmdq20mkacwxgtkc" + "mk8dc8pmuyt2m084066w060huzpg2zm2_0124567acdefghiklmopqrstuvwy", + "0123245672289a8bcddefg0hi41j4kjlmin53o0124akcpd7nqrs7224q0dt6d0148i4" + "8u235vq556v2vwdt6d281jxikyikzo5h8uxitzxww2tsdt6d01f5ts0h6vhv5hgc018b" + "23fgxmgc3onf23m9w2xmxw1x1jw2231x_01345789bcdghjklmnopqrstuwxz", + "0123415617258968a1b4bacd017ef606406ghijk3f06ha4almm4m006nb6ggklm6gao" + "opfjqm06rj01a1ba2f177989fj41sp5frt1uu7pvrjou8cnb1u01m550011u7wx76x5f" + "fjf66xnmspis1xioqma1haa11xrjx77wvw_1345789abcefghiklmnopqrstuwx", + "0123345002657890abcd372718e2f102e2egg3h9909c1i4jkg50fl21kmg3n923oddl" + "21f1po5eq4dr9c34177j171iistnn51i5eg321uee221cff11ilii8lsdluecd8vi89c" + "ukcfw9pcpoxwcf9f6u65oy6uw9wnpo9ffiia9f_01345689abdefgijklmnoprstuwx", + "01234253678229abcdef8ghidjc8kclmcd8lc8mfl1jnfom6p0djnqfr4sp4p882t9au" + "vhwn85jvuxdj7x96syz9Aud5BneCzvruDeBE2mnAvEFrgGHABnt94slgzvI25w0Dlmm6" + "lg85gG42HG1FD11FfrJeKBwnL44JL4_" + "0123456789acdefghjklmnoqstuvxyABCDEFGL", + "0123455606173889a5b3cd4efghi0jk1lgm0enn85oe30npqlrstjt1j01404edidun8" + "sdostvworwxr5yzqst6s56Abf0B4065C9DbEb36Cm0g22pwFx55y6G46ohB4ropq2p8H" + "oh38aI8HwEycf0s9bEAbwcawaIjGA2_" + "012345689abcdefghiklmopqrstwxyABCEFGHI", + "012341526789abcde583fabghiibabjhkllm5lnopim3kqlrstuhvoewcsibas2xyvzy" + "s05s5lcd07zeABvC2mkl2xnqixabiA0DbBlmlEhicsh55luflrFu5sykewkdufqGwHm0" + "IA0DCmabykyvABfaynyvvCHxm0wCasCJ_" + "01234578abdefghijkmopqrtuvwxyzABCDEHI", + "0123456789ab9cdedffb6ghgijklimi6naoejpmk9n9iq4rsnrdetq4hgusvfwqx6hhg" + "ijvucjdfdscyzrbgfbAB2ds7BCcaDyypsEdFasq482ca4hsE2dq4GlklyBm6eEEH6hhC" + "ICCJ82eEDAgJABtDFvk7guvuoGgugJGl_" + "0246789acdefghijlmnopqrstuvwxyABDEFGI", + "012345675809a5bc8cdbef5g950hhiji1jkdl67m4n53oppqr88ps9tquv6eiwx4hrl6" + "vy45z55rdtjApqqBrCr8z23DhinEs0e8sa7demaFsas0532vhrr8kGz2ndnk0zz24n8p" + "kGpHfIurr8nk3pur45Jknk1je8K33px4_" + "012345789abcdfghijklmopqrstvxyzBCDEGHI", + "0123456789a3bcd0efghijklm3no9p6qrstu5v8nn7h6whxyzoAqo2cikBuxrCD4EoBd" + "pt6qtsno6jtuFBG9HtIJtuKLiK4AMG7rr0EjNEO5n720enijPMJgqQQuiv7Q7rfH0R20" + "jr9HSuIPITgcU6JV5vvWtuvLHmHtm3HmXm23B2_" + "013456789abdfgilopqstvyzAEGILSVW", + "012345678559a98bc87def3geh6idjklfmnmoeofpfqhge2cr18stouvwxxypyzAfu9j" + "sBCDltDEopk6efFqwpklw11nqGfnl445mE38g9te249Dr14H24AGg95h9utoDE3ga96I" + "IJeuaChK6I23g5GK3aaCCDtH5hqhHqtH63DE_" + "012345789abcdefhjklmnopqrstuwxyzDEGIK", + "0123456718964abcdec5f7g1hi8jikjlgm18n4opmqr29estpgtuhvgmwxdudeevthyc" + "7kmzcsrbA0BCxADf8hE25F31Gf2cvHstzI5tDIA0th1zb4E2318jJH3scss8KGbcK90K" + "C9tuLdMCNnnMOLA3GjkPhiC9QG8jlkflK9lkikjiHPvHuv_" + "01245678acdefhijnorstuvwBDEFP", + "01234542676589a94b4578bccd5eb5fgghbc1ijiejigk37lmaanopqpr12r255aassf" + "ctubvmmafw8fa8as9wanxnyqpi6z4Au66zanvBzCzemDe83ea88E3qbF6muvan8GvBGh" + "nHI6gh6m2342bcmD0sJHub231gsH3qb50s50b5_" + "0123456789abcdefghijlmnpqrsuvwxyzBCDFG", + "01234556789abcdefghi4jk7l9mnopq8nristluvvwj2w9rvw0xyadu5nzABC4sDEFrG" + "HID7rEtJpwt2789KcxKcCuLzvM9aw9L4MNmCOfCo6OPK4jeyKHNAuvvMlPQNpRplxyN1" + "MNvMnuPKOiseuplPCul39a23j2iSSx2Tfsfgad4jL44jjR_" + "134578abcdeghjlmnoruvwxyzAFINQ", + "01234567689a1bcde2faag3fh1ai3a8cc3231dfjk43f0cc3el1mn939oi9oc323e2dp" + "23el7en9qlqr0cen7elsltu73f01hv4w4578x60dqe7eqe676ww0h1016ww0dpfp3f23" + "72y7yuely67ek6677e1m6w4welhvlty6yuuqqruqyuy6x6rz_" + "01235678adefhiklmnopqrtuvwxy", + "01234567879abcdbc5efgh0i46j1kl5m15b45mnop99q5r35232s5tosp90u3vqwxynp" + "no5rczfljc1A4BoChy7ky7bcnondC0Cjj11Ad33v5DEvcFhyvrrk1G45w8EuqwjcpH0i" + "4Bj1cz5D1GgE9bHhwBaBDfmkuIADiAklJiEuiAAD_" + "0123456789bcdefgiklmnopqrstuvxzABDEGI", + "01234567689a7a7bac8de1fefghi436jgkglmninio3opapqmrsqmne0tk8f0uvs6vun" + "wxgkyzfbsbdAyB9aC0ChshCes2D25EbgxFAGeyd9lH9aalIglHjCjs0uuHe0iuey0uqr" + "vDb0bgrJqrD568sbEcIggxfg2qKLzF8fgxxFcKEc_" + "012345678abdefghijklmnopqstvxyABCDFHIK", + "0123456789a9bcdefg92hijd3klm5jno3pqrofsh23tquvwxfyxtfgbzviAezwnzBueC" + "AeDvtBEsz6pF4GHuIFwgwxGJ67vikKquLE7M67mnnzzwJk6xxta93phigNkOlaBunzBP" + "c6KBgL92wgzwkKgLa9QpmnMRa40cc6uvLE45MHQSFTSMMqqsMqSM_" + "123569acdeghiklnoprvwyzABDHPT", + "012130456783900a1a1bac7ade37f3eghgi01ae930hj7klf83amgno890km07poqrrs" + "he90lt5lju8fo8lt8ffv21pwxog2de0a5821odo8xoxp58ski0trdekm4oodgydeegde" + "odoix45lxolti0tq0alt5l5f45hjrswhamhjpwpoo8az8fo81apopw_" + "01235689abcefghjlmopqstuvwxy", + "012345678392abcdefghh6ijklmnio23a5hpo8833qrisctu8tv8ion9w7xyzyx2gr6o" + "Am7k1BrC6aArD1EF5Do8d723GxHsgr7IHgy0J4fKLvM69N01y0FfFhmLItkl45luOG3u" + "L9l1cd23MiiPIlklMidQRLfKQScdpvARhppvTAh667KUU55DD1_" + "12345678acdfghjklnopqsuwyzBDEKLN", + "0102344536678791a4b4cbc3defghihjkjk991lmi2lhhiinjnbjkbeobpbqjrrs2t2u" + "ugvkowx9ydkj4694k9eolodlyd15nzvcb44667571591A7lm7BA7C0xC8A38DprEc3C0" + "oFoivcGHqEGrIflopq6IqEqIIf02inDpJ6GHjnyvnffgnf_" + "012345678abcdefghijkmnopqruvwxyzACDEHI", + "0123456789abcde094fghijk5lmlnj45igop7qrsbrb26tsuvw67xywbszqABvuCprDq" + "opE8oF5A5yyGHIfClhgJKkLgcLMfvo1IhL1NBvAhFAnl4F7qkimN3zhi5lI3x25yFOPK" + "Nc1N2snlsuvwQPBeEB014x3zoRBetmx2AhSvy3qKLTUsfgFUvwjhDF_" + "0135689acfhjklmoprstuwxzBCGHJLP", + "012304567018918abcdefghbdijiklbcm5n2cooi0hpnqrstnuvw9n3dxcry79zAsBCz" + "AfD9h8uEFqqG4sHIJHwFKwpAcorLwFzKKxMebc23DppNcqdeFqNOOg8Bh8Ej70IkPQ5L" + "8cPRIFcoCJRr04jSrLFRTKJh70CJi6DpKfjiEjxF4Ui6Fq18OE8BqGFq_" + "02345678bcdeghijlprtuwxyABCGIKQT", + "01231456789a3b1cde3fbghcg9i1ja4k141llmngg90ho0idilp21ch20hhckqfk4fkr" + "sht0kq0hcffqcf6rrj27h2de233bd51c01s2p2u7l4v04f270h8n1luwh2l51lfx5y5m" + "01xgq9t0c327kqmk6rkqq91cez4fw8tvpui1e5spossp1cc3pu7nm6hcc4hc_" + "0123568abcefghijklnopqrstuvw", + "012345567894abcdefg6fhijklmgnopkqjenr94sotlu3v1nwxyzf2AynootxBvzgxa4" + "5Cef0eDzinefEovF9GnHEIjJijiEJmmgG50Ktwf2LEtDMtHGzB56dNg6KrefepOaJmPL" + "IA83Q0NRLOj8RS0KxTFSQPijUe3vPipHvFVvijgxHbpkWI3vfkqffkpHpk_" + "01234567acejklmoqrstuvwxzCEFIJQ", + "0123456789abc4d8e3fgghij4k9lmnn0op8q50rstesunvwq7eqxyoz25AuB7CrDkafg" + "E2FcGzxH5Dgd5AbEd845exIo4kJd8qoKA1mLngmffJ7eMmyrs63H1wNnrDcNNnA1d8C3" + "BeOAn0PCi9rvAQ0hDQQtR7vdrvtq89diQt676BijB2mfSRD6R7h86BB9C3_" + "12345689abdfhjknpqrstuwxzAHINOR", + "0120345647189abc019d7cefgebhijklcmd2ngjo1pqnp5rls0eftuagpvw3rxkygzA9" + "nBebCoklDvBiEDkFGHlI1pxengh5JwKfLeMNrghtiOgigePMfcLPODrlKfasng85wLLe" + "klQtnFR83KlISQKfI720lIcTrxfc2UHoKfLKTuotkl85jhFGijuVsis0ij_" + "012346789cdfghiklqsuvwxyBDFGKLNO", + "012345637896abcdefg4h6ai7jekkc0lm5cnopnq1rstu0p4ekv7fo1wrxb2ya01zgab" + "Au8y78BlC7DAopjrnEFiAC1wEFlmpmGnnEyaBdHIC0JCvHfBCBuzKcGLM6BlNL8OsJmx" + "JvcdxMGnmxvHJCC0lmolb2kBlPHIPQfo0lopfoJvRvRStktKzpKGGLKGtKtk_" + "012345789abcdefghilnpstuvwCEIJ", + "0123145167894a2bc5544dae3fghijklfkfmnopq2hpo0rgsqtu0s40fnjvwqxyitz1i" + "q9A9mj9t7B7vv81CyD4Cfm5yu54wojjE6siF5ssDGEhHoq23tzlGBAq9DqsDsvu1waEz" + "noGE01yi145shyyi01iFFG513fIF5sezmGfmaeez0f01nmml6snmno_" + "0123456789abcdeghijklmnprsuvwxzABCEFG", + "01234516789abcdefge7h150i58fjk6l2km23n0op3qprs78tuvw7xyhzAc4Bn7CDjvq" + "9EAmjkaCszzAF8CDGx50redy2kh1EHIr01sJKELt4LaH5zaCKtEHmpeJMvLtis0NNOHm" + "PBeGtAQjOgRaS0SJTsEuJ8aHm2g3yGfgaCUPAf8VeGCDbIxDOgNOS0WRCDtA_" + "02346789bdgijkmnqruvxyACEHJKNST", + "01234564787229abc3defgh6ijkl3mnopqrd0k0stuv4ovwxyf3ozy4AcBwCDnEisFqA" + "GpC517BjH5Itj6iJejjpeagHGKBolLDtzM9mN245jvihFOowFDIePbBoDQMh89lEKx9R" + "ej0kEBhSklCT78JS89IemwjpUx0kBoP66qnoNORVwCP6VHow9mWJwxJSxTTX_" + "0123456789abeghjkmopqsuxACDEFGLO", + "012134456768592abc7cd7ef0ghijiaj202a4kl4mnonpqm0rp1psad2t26du62atlvp" + "wtt28sx5yoiqtl217a1hszzABs01nrCilDl0E201onB26B1FjF1p7G2xBsx5B2HlIxlD" + "21fj1pjFrpFi5JzKnrpq1r1h5F67KhzKxF451h1rgru6DggruEEIuEu6_" + "0123456789abcdfghijkmnoqrstuvyzACDFGI", + "01233456677389ab1cd2ed1afghaij1fbk3823dlcmno0pq2rskstuabhn5qvwpfa6ab" + "d2h501pedx6y0hndezbkAvBAq2pesClDEFGAyGHjgIxJ4v2Ka6grmrCL2BHCpnIM6ysC" + "bN1aa6uFrspfrtFOMPuQQwPvvRoIcgsCIsLwwSgILwIMNiCLcgMPPvfgbNgrfg_" + "013456789abcefgikmnqrstwxyABEGN", + "0102345667894abcdefghijkclla7m2hjcnok4bpnqnkr38st2uhdvwbxsyxzA494aBC" + "fDEd2xFr6fxG67vtDntyFHfIBIJKfg9LEBJCo4mtclwHbceMyxgNk4rk2hzldeFriL01" + "CrrzfglOfI1P01IjOQeMCRSiMytyqhCrKRFHFrTqqh56KUUVFH5WWJJCk8Cr8s_" + "01234579abdfijlmopqtvyACDFGHIKMR", + "0123342567829abcde1fgaeh7ii03jkljmn1oflnpqnddearp667s8blln4tm9egunvw" + "pqndx9yzABC3wDqbomEFFGCHwovwIvA6Jn9KLedfiMDxuwx9DxomGNNajewDvwOApq8u" + "klPC3jQvORBQRSPcc5qkv05duTkJjmOpc5gzbclnzrgzSPmgPCskcUAsuw8u822382_" + "012346789acefhikmnoqrsvyBCEHIK", + "0123456789abcdefghijk8lmnompqrjkslhtg4uv45wx5iyk45mqz1A0qBiC90DEFG2e" + "FwijtqiCd9ghHnIdjkfyht78pn673fB7l33Hqr23cwcJncfyHnJKcwyLl3g4MgyFrNDy" + "78OjrIPNrNmHeDQk6FH62e2a6FFGbQtqmqbBmHhampmHvAReaOqrs2BIRS5OR5ykqB_" + "135689bdeghijkloprsvxyzABCEGLP", + "012345671589a2bcdeef8dghbijklmnopqi9crst0gn0uvw4xy3zqABCt6fDdeAeEbrw" + "F8cGeH7gcrCkDIvJw4irxcqAKspqKx23Ex0LaKgD5IKxMFwH1DJgN0u6aO0g67mdyNGP" + "7ffQ01lqPfA7syKs452saKcdlmu6KsFm23RMmASRKxxc3zq6A7u6Kx3u7gghLhu6LBBC" + "_01345789bcefhikmnprtuvyzBDHILM", + "012345677898abcded6e26f2fgghhiij8jkl3aimno7pqrcr7cstg784h8uhek3vn3ws" + "jxayj0hzABC9zwec3vap69ayd09Dklj0ek6erE69aqoaaqqrFBcrrEEtGtdEbGApwEBH" + "C9ijChhzpwyIwjCJBbhKChecj0bG67gAuggAhzzH7p233aay67Le3acrrGcr23_" + "012346789abcdefghijlopqrstuvyzBCDEFGJL", + "0123345467389ab9cdefg11ahai59jfg9aik4lmn2op0qr7mos1aa8o8tctuhaavtwnx" + "xyz6jxmjcioAB4kCpwyDhEFBjva8G3wF387Hxl8qIejxbmp0tcHJxlb909KLo8JMHj54" + "pw23N1OKePcdP0ghha1oNQwGgKvyc2pwyR02PgzppwJvvSLTjvPU9jU9936UG3GBUhTV" + "Vr_0123456789abdeghjlmnrstuvwBDIKOS", + "0123456789abcde2e1dfghdhfaijekl8mnoe9pmq0dr0s2tuvkowrxe2iyzABC7z7DDE" + "u0dB01FiyG1pHtIe5pJDucBm851Bu4KcucKJ1pzAu06JL7Kc05iye1I0jLLsMKANDh5f" + "9O3CPeKcs2452ExQdfEmFiRSe205e1FRl4SIyP1pIeekk3SIPssz3NPsNqRSSu3NyPu4" + "qTSu_0234568bcefghijlmopqrsuwACDEFGR", + "012345674889397a6bcde467fegbhie4jkl2mhnhnjjioeigpqrf2s2tuvfb8wvxy623" + "zyeAkBhryfmCDEF5GnAtrfHfvlHIl2gbuvaJeA7ajk3K5wLqaJCHgMrgtNOunjbA23PO" + "rf2tb0IEF5fe0te445e4PCQFM07AfeR1fbwpbAEFnOpqOyyoRsscGPOyHIcdPzzSSIPz" + "gbrggb_012356789abdfghikmnopsuzBFJKMQ", + "01234567895a6bcdec1f7g2hbijk676blmnopeqrs8rj12tssuivbiw53r1xwxywytsz" + "3ABA5Bo8u9w5CyDEg1aFyGfzvoHh5aIf42aJKLywMltsLDJNOL7P67PQNk8MRuIKkdfz" + "QHBAGRASTUiIxzBAinQOVnhWCyin896CWETXHBw512vTiIx3txtRCyBJ45P4XM45zMxz" + "wxzM_01246789abdfghikmoqsuvwxyABEGHLU", + "0123450456789a67bcdee9fg7hi7gjklmn5o4pjmcqdrsjtdkdjiiulonv3wvwuvxuyx" + "yqznA8Bf78o22uo2lolCfkf44lg5sybpCD6E67tCtdCDfg1clFawGAE9H16E4pf4drgj" + "E9td63jIgz78ktBggjbpp6fgfkjIA8784pp66373bccqqAfgcqgzhw78bc8hA8GAhwA8" + "_012345689abcdfhijklmnpqrstuvwxyABCDGI", + "012031456789ab1cdefghijk7lmnodg8lpaopq84rs4t420l01umvwoxtyz55ABCxDfg" + "ozAEnFwjGBHrI0JKfaLHz5fg016AMhsF1c8vg8Nu2OyGvw9tdIi2pPQzEuRNgQiIDRmn" + "z6AEi289oxGBozt32OI7lSKSTjGcwjBmEBzIOUUHv2TySPAEI0PV20BmVF0lvw6ASPKS" + "20hvwj3Et3_01256789acefikmnoqswxzCDFIJOQT", + "012034567849abc4defccg0gh09ijka94aklmn9iho7fpnqrsktp1q5uvcfcwf6x8hke" + "y6wyzoy5oAy2fhgnho34k6c4BCtDE04gy3Fyy334fhxG4gs50HgnuI5u20f2j2Anlx0p" + "01E0EJxGCxKj8EjJKjqrxqgn4g6a4a4gabLq1qEJ6aJlEJ2156gnAn1q26f2fh8hfhf2" + "26_01236789bcdefghijlmnoprtuvwxyzABEGHIKL", + "0123456789abcd8ef8ghh0ij5kgh01hljmnhopnqrsteulvofvwxyozABveCjtq4lB45" + "5khljm1601nhi50DEnbFq0Ahq4tecu67obcGpHIjvoBvlBJw7KopvozcdJAdDvepLD9b" + "va0DdJKH3isAyMnqJwHNDvBOeCAP7KPIvah0APxaDytMjtvanhlDij01lB1yzAtQAhjt" + "Adh0011667_125789adegjklmnpqruxyzBCGHIKMP", + "01234526789a85bcdefbg4hibcjkl0mn5op2qrstukvwxqixoyzABfCAdBxqDmkE1FGx" + "fbHIsJKltL3MNg236qi1OP8z45MrcQbi5ost3MqPMui11OxOuk6qBffhh0011FOR01C6" + "xqkE3M4CS9deTD7UAnI2CSqj4vJOsJUDxqg47U8578nagpaVWTzm2xpGRX85ORe75ooy" + "deC66MmLDtC6_123458acefhjknoprtuwyzBCEFGHLPTU", + "0123456789abcde0f18g0hi3jkl0manfopqerlo6f1stuhvs8wx5boyp3zauA8mazm3z" + "BbwdzxCkwm4DgmjxEvmB674FGtgHgm9HzmIJnIwrKw89jkHauhabDIzxhLmaL7CMx5wr" + "rn9rk5Nh7t6GkOP6MFQy9qL7DRRyhLFQQJ89opHazmFQ4DJvvsSC2Sborn2KnIc4HTA8" + "IJAUqeAKnIA8899q_01245789adeghijkmnopstACFHJMN", + "01233456789abcdefg2hcij06klmndopi5qr1stuv5qwxy7z8AnBi156jwCD7BzCEiDj" + "i5Fq2378ciGHz2bEIpCDj0B9tJKILzo4BKlGJM34accJ0123HsajhN9a6kv5opLODjGH" + "z21sqwLz23KhhlwxnB7BzCN0ChnOndqrhl4GJxnBqwEvvHxfjw0xndKbdeN0DjBKOPPQ" + "DNPDOPbcndKbBKnBBK_01234689abcegkloprsuvwyzACHLM", + "0121342567898ab5cdef7ghij317k4lm89nofpqrst5156uk368vj3w2usejxdjkyqz5" + "5ohyoAz5qoefejiqBbsCDEynnFbnBbAEstefxshyiGkHmsIB21qr1o1tI8JbKeiqstwb" + "jkmsacvawaIKwbtAL551kHmHdtaxacL5ejjkkCBb1t5q2125b5vljk8v5olmJbmHoD5o" + "I8IBBJJh5q_01245679abcfhijklmnprtuvwyzABCDFGHIJKL", + "0123456789ab5cdefgh2ijgaklmnopn04qmnrstuvrwpxymzjAphBoCD7rEC2FzGo4uj" + "GHHfAxIk9w1iJqKtdEh2jAeLlJvjdeDoiMNsopK6AxwpBob3zG8DLBrs9fO8CDe0IPph" + "JqeKx3zIQOnI01zG2Fh2eLn0GHHfRNLBECBoR4K6tR67o4CDklGk01K61i01wpklDw8D" + "O8sSFTRNDoQOQz8DQO8H_012345689bdfikmpqrstuwyDEFHIKP", + "012345602678796ab6c00dedaefadghijk7bl0babmno5opnqr9sbteu0vw1a0xq5y1z" + "thncAB9f0dcBCjDkxpq6kzvEDFncnll060n5q6qbGHbaaj3ld1fa9tyF8nIjpJthCjCD" + "Ky79xpv1ed78796aIi8n4KJ4mCmIhiJnterLrmmC6CGDLDLCCjyvAeDE60n5d101w1GD" + "rL606aajLGrLCjKH_1345678abcdefghiknopqrstvwxyzBCDEFHIKL", + "01231456789abca7d9e2fghijgkb9almhno8p078qleddrsfbtuv2ia7fg4klmgw7xyz" + "ysjgomAjBb018C8Dpdd9BA6Eiusf3quy23FGnHjg78hiIhJvvsgK569a14xja7hnv5st" + "CLHMpd78jNnHnuiOuvhn1H14vsv5OFxj566EPAnH6g9OFGGDphOvQpIhfgvshnaP0rFG" + "OvnuJPOFG66gRarJrSiOBbJPd97Gx6PxxC0rPx_" + "01346789cdefgijklmnotuwzCDEGIK", + "012345607896a9b99c9ddecfgh9cfiijkflkhmnkmopq1qrgsitruvwxxpvwiywzgh01" + "hAhmBCCDEDs5qFrg9dwxCughncG0BgCH1qIJpqxpKdLMMN2udOc0PQgM8fMRoPfpBgf1" + "CuLMuvSoM7PixQTxun1qncrJcO3KTONcoslUVsc0cfrLb9LWul2uDSOXLMoP8YZmN8Lb" + "CDxQlUV4PQY11qzQqFoPQjjFrLICQjLbHNmVb9M7EDh7hmEhED_" + "012346789abcdefghijlmnopqrsuwxyzBCDEFGHIKLMPQSUWYZ", + "012304567898a48bc5dc6e3f5662c5cg56hgi101g0jgj7kl8m1n1llopjq404qkmrst" + "g0u5c5cgjgj7788mvwxt2n233vphmye3zes9yAb3B1pC4l98DxsEbFvGEz8bHIAJKLKM" + "x504g0HN8mOe6eLPvwEHwQby5BsEyRB2EHMDu7ObPxx5u5u7DccgzIowScTpt6g0tzFA" + "LSPsKT7iNFB1NUTpFAyAby01ikyRnozIpC04CVpC2n62u9Tp7iSuu99OScno_" + "0123456789bcdefghijklmnopqrstuvwxyzABFHIJKMNQRT", + "0123456789a9bacdefgh3fijklm1no9p205qqrs601t5ju7kvwprx2eyhaznv25jAe26" + "a9dq6BCzjDyEFA4ixA45gGxsa5HFFIdJlKw4kl8dvwyklKKonoL8wt4MvwFwNFh4O7v2" + "Oy9qfkdJtdtfBPQwuKefMCqu26wt9qCD4M89RKDSrSbaghFAuKij6Bw4iTvwSoNx4MUN" + "NxAOwtharSx2ghbt266BFAItbaTz7TtfbtTzCzijbaa9aVO7sOWVBPMCWMMC_" + "0123456789abcdefghikmnopqrstuwxyzBCDEGHIJKLMNOPS", + "0123456789a8ba3b1cde9fg6ehiaba89jkblmlbabll00nno1o81pncqrq7r01stujjv" + "hw9c8981txqxrqgywyem5zABvCehdDksEFGgfHstvsIJqKI8txa8qxcqtxAGbastLkks" + "stwj2MtxqxcqqKHK9cyvIJzNKO01JPJ9uj89gya8Q9GgR8BRHKB4Lk9fDGAB89gyd345" + "6STAR6CEB4vCABg0a0NUVgjvlpa8wj89CE9fmlBIGRfHDGB4TMemmlTAjvbllpMb4QQz" + "TMHK_0123456789abcdefghijklmnopqrstuxyzABCFHJKLNOPQRT", + "0123456789abcbdeef75gdhggiijdjkcl21cm4n667o7fpqr5s450kt0th75klu3vt01" + "r2cu1w1cpxpyys5s34izl2lbb3ABCBDCwnuEBy3pmFGFeGef2fHzboglb33pIJK8vLfF" + "MINkLOOPIQ1wkll22ffpL0IrQDm4RaEpSRklDA8e0kklMKRaL0K8krabt001boTNo7t0" + "uE1wnoJ9cu0kwnkraUVc75OVNVTN45efr2VQ9fIrd2W5J9fFm4gilzAWQDDAAWoWXYo7" + "YoXYo7_0123456789abcdefghijmnopqstuvwxyABCDEFGIJMOPQRTVY", + "01234526076589abcdbefg7dhijfklgmnjonopp818dqi2gr9g26stuvwxdf6a3yjfab" + "zAvBl6j9yxbmmCeCkln3bDyb3a2326l4ElFhinqutkbDaGi2rCHIDJGvFIkKLw7jAqqM" + "l6NFnj6aaMklinl6FOHPQyPREl26S7TUMmnQ9VNFgmhEi2VraG3a2dQw89nQn33aaGsh" + "onElHPinhishtkGDl45G9g45qMyg7dstAq89AUtk5GK4DBkz4UNsDJqMstkKfMtkdfst" + "kKKTfggrrCfg_0123456789abdefghijklmnoprstuwxzABCDGIJKMNOPQSV", + "012345678958abc4de5fgchijfklmjno8pqrsqrt1u5r0vqrwqwaxbbi6ny6x8izr0la" + "AwBlgA01iCD4uE1up1fFcGkG7ooHFIJKekLFuMar2MlakyHNOLlPqQHR4qmO5ftC6ny6" + "yxxbabarrttCCuSu9SgDqQCR6hDmG5biTMQKOJBluMRESugDoHekgceBvTKv5f7ofFwa" + "abqriCsqqQ5fzRhiUzzRCRVQdgvTiCbiabwaG5kGREEWgDuEDm9SG5Q05fwq01mOqrDm" + "1ugD01cwecdg_0123456789abcdefhijklmnopqstuvwxyzABCEFGJLMOQRSV", + "0123456789a8a2bcdefgehijek5lminmn0oa7o703bplkqrqr323a25a5ssttufuvfkw" + "4x9yyzbzwAs8BnCD5lAEFGEHn029pvIC12gJDpub9yCDKLjMhweh0a7owAoNOPNG29ej" + "zKN8yzij9yAE5a89D5EHMQwADpcRScplTmPUN8qSoNUVQWVKWRpv7oltFOvkekmiOP5s" + "irsO1rRLWROX12MQPUkqOPcKFOsOzKqSScoapv5s5luUoNplFGpvoaltfutP5afggA01" + "70fuuU677001_012345689acdefghijklmnopqrstuvwxyzACEFGHIKLOQRUV", + "01234536151768679abc8d7bef7g8ghcij0k7lgjbcm8gnopqrstiuvuavwxyzdwzABC" + "fDEF3EuqFGH94IrJJK04LEEFMrNOFljxkIqrAqljlPEQjxep9a4RS9hN5hQiPOtSrJLE" + "Lk7llP8iCTvUmyEFkITKPeef6QGPIGVL36WN67S6GPzUavstoX8dNOvuSm9adAmyLE45" + "67WNavPeep15Of7bEFfDxpux1F15pDxpvucBno5bS667bnnobc5boX45xr1715jxcO6Q" + "ljQlcB17ljjx9Q_0123456789abcdfgijmnopqstuvwyzCDEGHIJKLMNPRSTVWX", + "0123455676089abcd4ef1e1gghhijfkldmmnnh6bocpqrphiistd8uvtm7wxd445mnnh" + "xyuzABhiCDAmhEefrD1elxF0088uuzgz1gG1zo01uHgzfIeJ1eafKwLCG1x71gwxG1Kd" + "013MgLtd088ughgzuHNkd4tdnh0BO5BazrNP45O5uzxu3Jk0MILr47Nk01yHsQ1guzgz" + "1gIsMIcRJCsQCDm7y6mn76EcrSQTjM01UNnh0Bk09j01Ba1eeJah1ebc01cRafk0NkJC" + "3JV9kAAmm7PVAm_0123456789bcefghijklmnoprstuvwxyzBCDFGHIMNOPQRSU", + "01234567869a7b676c0ddeefbfghihjkl4hmnopq3rk3okoststuuvwmhmx9iyzABC7z" + "5D1ECtFCGsH2HpypAyzAziih5FIlDJdDtuj2KdFLMNenuv8KvODJDnnobg1Dts7byPJG" + "tuaMLwlKKcuv7zmvnjDnEJ2qvrPqGs1Dcb86677zhm3rzNfHj23QQRuSenefl41E01Jo" + "fHiH1DMNiyihAThmyPAyUFwmk3okJoDJyPk3bgmVTpFJUF4UJG1DwmqQpq01x0m3011D" + "qQwuLwDJWgJGTpGsNTsS_" + "0123456789bcefghijknopqrstuvwxyzABDEGHKLMOQRTUVW", + "0123455006789a9bcd6e8fegghbibjj2egkglk78jkdmnkolpoqrrpstuclefvpwxy2y" + "2gegiec9toz78fA4mBCA5006w645wl9o9b5wD1EpgFbiA4CutGlkFHI0ix4JK1BLyMNF" + "O56PJ0Qsst8401xyegct4RkNlkSQpww65wwltGE9QsCQeTpwEp9begc9CAGURbL3yM50" + "tG78A4rp3M5wwllkkN8fL3ctLNVWqrkNlkRVwlCAzC5wERrpO5ucuEERRVsppocdVWon" + "nXonucpoCuuccddmcducOI_" + "0123456789abcdefghijklnopqtuvwxyzACDEFGHKMNOQSVW", + "0123456789abcde99cfg2hiajidk7l1mcdfn1neocpoqrnsjqt7ruavs9wiaa0x0xyy1" + "ezABp0Cg5wjisDEFiE5iG6w6eoHCbCmIJFBeyKbfuxB8eoe99wwppLdLjiEb1mzuBMy1" + "NFL15GLr85LOuxbfoqoccppL9cfgPIrQmILrLORSv45iAvdLB8cxQRkONHfnATqdqU6b" + "nVoqUWOmeo5GlQkO1nv4HlNHdkGEdy67lQQRcd2G895Gv8vswpUyv885425w9wB85Gv4" + "2GG6dyAv9c2hv4w6wpcdcpp7_" + "012456789abcdefghijkmnpqrstvwxyzAFGIJKLMOQRSUVW", + "01233456170181489aabaccdefgfhgihcicj7kdl6mn5cdonpqhrstihpuk6vowqb440" + "i0nxyedldyyoozAz3jBCqDjyEFFGenCxrszxhrefHcIJvK177kgLJvMha3vKkNhgJyj0" + "gLr70181tuqO2I7PAzEF1fQrbMBLf534abbMmRST9Hj0gU5Vcj17sttNWgGA1fjyWBVO" + "XTHEEJX7Lp9aAC17BLr71fgUloyef51fHcn5lB5V48xVBLLpFlf5XTCxUkUp8X48rscj" + "rUUp3jlo347k3jACacQrQSQr_" + "0123456789abcdefghijkmnoprstuvxzACDEGHIKLMNOQSUV", + "012345671889844abcdef6egg1ahijjkka9l7mcn89o9181bbcc50p0qrps1tuvwxbxy" + "zsAtB6nC9aa7676DlDcnEFGiwHIJaIfKhLesp884p80p011bbcMuHMFNhmxbuh96tlMu" + "HtqOPvrQ0ppRgiGxSynC0p0qy33TeGqogw1Axyo9rpvwyU7mgisSTVvq9aBTpoiFoWah" + "o9qOhLrpy33Xxy4aFc96NnB6aInIdP96TVzr2Bc5S2sSs1IJ1brQ1ALY5VAtnINnc501" + "hLGxT71bA9lh96deB601hL_" + "0123456789abcdefghijklmnopqrsuvxyBCDEFGHJLMNOPQTVXY", + "0102345673689773a7abcbdcdeeffgg4h0ijkljmnopqree0rst94juavrwxyzA61ngB" + "Cu5tD5EFqGomabb3fHcb5tIEtJbKdcEqu8b3AL87MKqA73NlAucbEDnors97t9sOP801" + "QRQ0011nSD87rh97ucTQlTlPomu8OH24KUhV9xNC69CuAuQ002ALG6sOyL244jh0Labg" + "Q0vdb3bgcfgBsVxWHBBXWi9xKiabdcyLpqL9gBbgyLxWgBab9xR3Fy3iKiR3QRyLvdLw" + "87fHD5zwt95tt9a79xwxxWD5SD_" + "012345789abcdefghijlmnoprstvwxyzBDFIJKLNQRSTUVWX", + "012345657218891a18282bbcde7ffdegch71ijklmdind4op45p2qrsotn2bbc8c8u3u" + "o7b4vw5wx59hy11871oizAsBvCfdDEuE3uF3FGnHinuI3xifAJhCwKfbLxBM7fjb8ufd" + "MNevvwO7Iwke45rgxIjLmkLxx5tmF3OP45Q3IRSTSqUOmk4vVz9h0SWicIacH4NkNXNa" + "evT9ijWizAOP9hyYMNVshRjHzopjFGacAJ3xByApRKpjcIERxIyYJFRKjHTDsBuIST8u" + "ByyNYqH418ZSFGYXYqXlSTByTDsBDE4v_" + "0123456789abcdefghilmnoprstuvwxyACDEFGIJKLMOPRTVWXY", + "01023456789abcdefbghijkj7h7lmn8o78p8q0q778rs8iimomdodftfpt1u3dmc2rv0" + "wxyzxABzCypdp8DEFGHuIgJKjLMKKpDlcN01gh02sO8Pbc8iOQQRliq7JKASvBDEqDC3" + "vT08BzvB2rK3UVfbUDv0uaMqr61oD2mc9WUVmOim2i2uus6Lxsijwr49T2r6i6cGnGmn" + "jcwr08lkkjp8MKWFIp2usOjc8ieXD2YLUDi6MIqDcN6Lob9aaS9a1o49ONCBCyCBB1q7" + "1ooW8o788oobsOK3eWq7oW8oZqq7788obFoW_" + "0123456789abcefghijklmnopqrstvwxyzAEFGJKLMNOQRTVXY", + "01231456789a3bcdbefgf33hich0hj8ikl9kbjgh0mh0hjmnopmog9788qrqs7ntnu4u" + "dvcdic8i7ffgghhjjmmw0mx1mw01gyzpgh0lABwp6CDzCE2FFGaHx1GEsIFbJKqLMKN5" + "NfoJalLOykPMcx148gghtQHzAalR0m8qbB0114bjjHHoLk4Miy78GexSkloJeTGev4JK" + "SR4uuUnJFGFCTw6AKV9ag99aLk4uMKFbwtRMhjOWBHAalR8gghh0y0iyv47falCBOS6A" + "lRdvcdBHoJWPHo0mPMHzoJMKKVmwMKPMxSWPyx0my0yx_" + "0123456789abcdefghijklmnopqrstuvwxBCDEGJKMOPRVW", + "01234567389a9bcdae1f4c7gghijkllm6k3nop7o7gpqpcc959h5rnrsqrqtuh1jvwvx" + "9ay6ozABrsraaC5DyEEFBcG1BGHI7wJKdfcd8L6IM3oBANFusOEvOPkApqfe3nrnPKfQ" + "EFFggpop7o67y6pqLPwBRSqtMTRUhqIwN0G1zMh559CJ6k8Qy6VTrsqrW5klw4MTlN45" + "bC59dfzGnLyEfQ9avw7oSDDbzMaeN0pqbCFuSDraz2sOuR2rz2rsozFuCJeJklopEvBG" + "vweXwBcd4chUcdpqqrFxeJoB7ohqqrgh7gghrn3nM33n_" + "0123456789abdefghijklnopqrstuvwxyABCEFHIKOPQRSTV", + "0123456789ab1cdeead6f6gfhg8g9i9dd6dejklmn4elolopamqarstuie9i9dv8elwx" + "v0iyolo5yzAo01Bvv0CrCDEAFGiycy1c0109mk2v5j9d45HIsgieJcKnGLGMgdELMwxb" + "o5hNwz5OsPQCbtLo3sH05jn4sERtjk1cA4RSyzQBcTEArsmkhgamkuGLdeOkU7btrV1c" + "NWzbel7OgdyzIEOkTzU7kubtGMRtmtVNlmmttuIGCr3IQCzbFGJXIEAosEbtrstuCrzb" + "6ld6RtQC6lELolAoLqqaLqTz9d099dELsEEAAooppRop_" + "012356789abcdefghijklmopqrstuvxyzABCEFGHJKMQRSUV", + "0123456789a5bccd9efghijh5j52kl3lgmnopfqrstubmq4vfg5ghq9gw9hxy0zAABeC" + "fDfg4f9giBiE8f9edFaGbcH601Irv7GJjhEs4vzyJKBLM8gzbN7Ov7v22z3ihiGjdGPp" + "caxEcdiQMw89bcN4dR4fpS9ea9aGGJeCiBzA23CqBLTKNaB1UHvDl0VUBLP8uVKxSDqB" + "DzB1zycdCq7keCVP9gFewFxELtPpmAmqrLRnP8TKLtSWIrSDqrvDMwN4oIv2HvmqIr23" + "rLemFn4v3iiEGeELrLM867EsIroIaGMw7kwFGJnoMw3i3l3ikl_" + "12345678abcdefghijklmnoprstuvwxyABCDEFIKLMORTUW", + "011230456783899abc9dcef4g85h454d010hhij0ik301i83l101m9300hno7pqrrsks" + "ikhihtut7kv0wq3dxyzfiAB8CvellDE3Bm3dduobqrFG5hhiDibcf9awHwwq89vl3dlI" + "6tGxmJKCf4LFMNOyzmPaeQFrjMRBC3nS3delnocv9dv5celDFGJTdhSz9a565D6Faw83" + "el30eQobFrJaDUEjutI2zVbC45IUVWQNnS0183no3dQInRI2f4SzE33d5DgEogau4512" + "dukOC4VPUppx0145PXX6SBtk89gEzVUpPX9aEj67zfkOcj7pfPdhpO5h7p5DdhDU675D" + "_0123456789abcdefghijklmnopqrstuwxyzABCGHINPQRTUW", + "0123456789a8b51cdef6ghi2j67klmef4n9o31a4pl5pp6qrst5f3euvwvxw20sy6zzA" + "zBkC0Dmk0E4FG7HpIu5pJiwzAKxwLMxuiNplOAp6PKyQrRrzwzMmvAN3xwExbeJsHpST" + "CThC8HRhgkdEolBKUazB45l7a44FcuFqLVrRlmBCwzxwybPSWGbeQHGRex6zHoHppl4F" + "gh1xzBwzcXp6Jia4tdDI6z7BbeuvxuyQ67WGBKyb010DXOi2be20DcqrQHl7exlmi29W" + "OPdE0DnWxu4nuvvAHooM20cXHoPSQHl7zAhSnWi24FrzqrJi1qi2201c31N3UNN33F1c" + "31010D_0123456789bcdefghijklmortuvwxyzADEFIJKLMOPRSTUWX", + "012345467890a3bc1dde62fghijk23l9622mmnondo5d0590945p4546622mmqqrnron" + "s6tuobv7w0wjj1f878xrywfbez05A2nrBCondoDEFp8GHhmqFv6p62tB1dsA1IdeJoCK" + "LlkMNz8DhsjkzcMOpo5pu6LHvPQRsAv7AvrE01f8Nz1I45pmcSIN055pBApmT4iUvPwi" + "LyHtMfLHywVO45mqOgPqVOqrlTezFpgQrEmn1IRWiU2mKPK3015dzcezde5d3qfggc5p" + "UJmnPxAv6ppou6Ht05JfGRxGobGR7nQRBCkVTuv7PxtB45vPRWUJQRgQAvbcBCobpoHt" + "jUHTfgf8wjFp_0123456789acdefghjklmnopqrstuvwxyzABDEHKLNPRTUVW", + }, + + full_with_errors{ + "01231245016274787445", + "012345605660780112233923", + "012345624785184559851801", + "0123456789a8b7cd14c2deef", + "0120324567859a294bcdeadecd", + "012341562775389a3b56b7ab75", + "0123451267849823508445845001", + "012345678598a998ba8508345c40", + "01203134015617206892200131345617", + "0123344556789abc4a9d02ebf47g277g", + "01234526756869844ab0ac450168844a", + "01234356789aa006b6bcd8d1efgfc1hf9c", + "0123456727386790ab5c039dce2390ef3gc9h1", + "0123456771508219017a6bc756dc45ecc7561f", + "0123345627389a07bacdea3f1g7ach2i072329g9", + "012345627895a6bc62d7ef9g7e8gd515b1bhhibcbh", + "012345674898a2191bcd5efghbg8i01b010f181b6b", + "0121233445260617016869ab0c68deef383g34012g1a", + "011234536789784a349bc667c67889539b78dc6725c667", + "01230456677896a9717b010c9d67ced06f0gdh0c0i565jce", + "012314505675566389a5b6cb6d2e1f2102g2501f1hb6b502216i1hcba52e", + "0123456789abcd1e0a9fgh3e23ijk7hj0295k44i8l0mndoiopbmpfobqeefqe", + "0123421565789115654a6b63ac206d3ef9gh42i4cj633ee020424aaccj07ka8f9101" + "4220424a", + "012324567879149ab7c4de805f7c6g5d5h8ijklmb55lln20op5o24pe56lm8ebqpeop" + "8rb479638g78", + "01234526076879aba2cdeb2fab0gfhijkj0la15m0g0d3n15o05pfd1q4f1n0l151q5m" + "2m262pc92nng4rs9c9", + "0123456789822ababccdd4efg82hdi9j2aaceck5l04m9445cnbo0pl02q2a2qbcba2r" + "cdbodi0s2aall0j09j89d1acd44md12rcn0s", + "0112134567589a6b2c37a0d98e9a6401fg120h67a7a0d945649a45id67d99aa05801" + "a0ji45642c376737idk19ajid9ej133767k19aa0640h4558", + }, + + partial_with_errors{ + "01234125_03", + "01230454_125", + "01234051_012", + "01234516_025", + "0112013450_023", + "0112344526_045", + "0123241567_037", + "012034155601_024", + "0120342567_02456", + "012334451623_0236", + "012345166789_5083", + "011230450601_12345", + "01123245657387_014", + "01203456728768_135", + "01230430456758_126", + "01231245640170_350", + "01231453678445_027", + "01234115567478_024", + "01234156473889_025", + "01234251678472_036", + "01123405010526_0135", + "01123456756056_1236", + "01230425366782_4025", + "01234056763843_1358", + "01234205067258_1246", + "0123451657896a_3904", + "01234567388978_1256", + "01203456728720_01235", + "0120345627486449_1359", + "0123403501160167_0245", + "01234156370889a5_1246", + "01234250657889a4_7163", + "01234506727894a0_0356", + "0112304536300112_01234", + "0120345657530118_12367", + "01234250676017_1234567", + "0123456753282001_12478", + "011230456787925aa2_0346", + "012334056789a75bbc_1269", + "011234506501501201_12456", + "011234563725785669_03467", + "012340510160678798a9_135", + "012345145637230182_01247", + "012334562370528796a9_1245", + "012340563738239223_0123468", + "01234056078498a9408498_1367", + "01234156622768969a7b4c_0258", + "01234235670482592335_0124568", + "01123456017809a5bc7d_023567cd", + "01234561748695a070b7_1256789b", + "01234567538968a52353_0124579a", + "0123300114565723825957a9_12367", + "0123456708590a06b7081c_012478b", + "01233425462738691a342338_01345689", + "012345465745808557469646_01345678", + "012345678497a6bc62672dc0_013579cd", + "012304022561782927ab1773c9_134568b", + "01231405617849a79aa749140178_01268", + "012342352356678056893580011a_12469", + "01123456473468560180930112_12356789", + "012324561785799801a0798501_01234567", + "01234516174645581645947994_01235789", + "011223454678298a78b2452c97_b70893265", + "01234256758914a5177517b65614_0235679", + "0123454267389a7bbcd0e6677f87_01256ae", + "0123451678961601a0026496b878_0125789a", + "0102234552267869a54b522cc669_1234679bc", + "0120134567740145389a137b73bc7b_024579a", + "0123456789abc67a8d38ded494f9gf_12479be", + "0123456783791ab7acdb3ae3f4_0134678abcdf", + "011213456789a468b6cbdefgehhi7jgk_12569df", + "011231405673839ab701a6c7d773_012356789bd", + "012334356748949680a9bc964d8efe_1235679ab", + "0123410156570898ab5ccb697d575c_01235789bd", + "01234560789ab9cd96bed9fgeh9iifhjfg_1357acg", + "0123455667144556148945901401144590_01234678", + "0102345675289a7bcdefgd8gghdiijd3fkdi_146adeg", + "0123450678890ab3ac64de4f2g9h06ig74_03567abdfg", + "0123455001657872951a5bc7dec665c6efbggh_13479d", + "011234256217289a122bc9c246de2bfc6cgf_12458abdf", + "01203456726849a38a9ba3cd68e73456dee749_13457ce", + "011234015675289a56126001a7609a12562875_01246789", + "01213456789615a3bcdefafg9h1ijd1klj5dgm_12458cei", + "012345567892a9b0922cd596564ed0fdd4b0_13489abdef", + "012345627896a9bcd41ea00fcg01ghfi0dj164_035679bf", + "012334567819ab9ccd2ce2d5c7199defg9fhi12c_0268bcd", + "01234562785695abc26256231962a6a99519_013456789bc", + "0123456780629abcd8eb8fgeb93hh1fb8f1ibccj_1257acd", + "0123451637589abcd22e625fgbb11662chhib1_1f0ebg4379", + "01203456758087597515a3ba5c4da31e34154fa3_e8ba61930", + "0123214556789ab1c8dae8f0gf6hijbjg7k66hlb_013479chi", + "012345678298a71b4cdaef9gdhhiijkdldhelmm6lm_0356fkm", + "01230445167528955ab9cde05875af8gh002hb8a_0d537g4h29", + "0123245671189840a4b9c8c23d9723c2c88e3d_012345789acd", + "0123425431671859a0010bbcc5c2599dde5953_012345789abc", + "012034567890ab627c56c90d5e7ca7f0200d4gh7ihgj_12358bd", + "0123433567789a2362b61c7d9e62a7f9e59a9g239a_7d23af805", + "0123456478196abcd46481aef69619gf021h01781h_0a35d9h7c", + "012324456782249a81a8b5bc814b50015082a89aa882_012456789bc", + "0123450678969a62bcda96e9f0064gghi0b7jdkeliheedlm_13567ac", + "01234560780795ab0c9adbec3f1gc1711hfij13jkil3ml_12458aegh", + "01234567869a1b34c219bd6ef66g9h2i9fb99ff64jfhha_01247acegh", + "01023456784859a11bcdefegh7igcjkclhcdmj8n7knddond_1368adfjk", + "01234567589ab9cdef4b813g8hi2j4ik23kl8elmneo8napa_1347adfgh", + "0123456478049abc5de9df2ghigj4kblemm5hnl6jee95o6pl6_13457aci", + "012334536741389a1b1c9deff0011bgbbhgbijc92g23_012345789abcefgi", + "012314451678592ab66cdefghi4hj71d841k6lgkk5k6mkgnmoop_0348befil", + "0123454678690aaba23cdefdegchi7dej9ec2ekl69j35i5m4645_1258bdfjl", + "0112234056017812154049407a12012b1256152c2301404d4e_1234689abcde", + "01213435672890a97bcdede4cfghg3hdaijhhckh74l3immnaikopo_0146cefhm", + "0123244556789a8b7c6de85a8fegghi9d1jkljfmnoblmpiqrbosqt_1368abekn", + "012342567890a85bcdefbg1ah7eijklgm9nlo1lgmpkqp6orqsd88q_13678cfjl", + "01234567089a7b01cd8ef984cg1hi0hjk8jl011hkmnghj08o0mcop_13568adik", + "012324567895158a565b23c935d72378e95fg295he205bei_0123456789abcdei", + "01234526789ab4cde8fgghi6d778bc02c0cd3j1kd7hlkm23ano2nppq_035679cfj", + "0123345647892ab8cde05e71fcg5h2ihbe1j7k070l5emfi5ihh2n5nbb8nb_" + "1269adk", + "012340567839abc0d2e4dffgheih7j9kle9cmno95mploqrpm9s5rtsuto_" + "123468ahn", + "012345670869abc8defefghid4cjk0266lk6hm67demnoneplqdea545d4_" + "13578bdfi", + "01234526231789a87b1cd74ef0g6h7gi0j3kiljm177nlmob3pilq33hporn_" + "03569cdfp", + "01234534567584790a47b0b2c723de5f755gc775ch0cc73i755gj9_" + "62abgf951483hec7", + "012345627489abc8d6efcdghi0f58jk5jlhmnmond6pdiqdrrsqrdarbpdpt9b_" + "1359bcegp", + "012340566789abcd7e78f7g6hg4878iccj67dk3l7m3dmn4opqg6nqro5sg5806864kp" + "t7uk7m67t7_12359bdefghjkoqst", + "010234156789a87bcdea2fga4h3ijk1j34lmn3fop1n6q002irsn2kkm1jn61skg34l4" + "4iklti6tl4jkjl4i_0136789bdefhikmpqr", + "0123456789abcdb21ebfg8hfg7iji7khfljbfmnobfj0lcpqojj023ra01sn3t1unacd" + "hmnomlvmlcionrahrkhmoa_13579adefhijkmnqsu", + "01234546404667467801091a407b0167c4d2407b1ebf467b67017b46c41g0h40bijc" + "c44001jk40bl467b6746407b0hbf_1346789abcefghl", + "0123425678592ab2accdefg0hgg9ij3kfl2h3chkhgl9bmnopbqpmrh30s0ikia3fn4h" + "rcbmqetrrumaq4arpba3fv4hvhh3vha3hkvll556l5_01346789bdejkmorsu", + "012345677897abcde0bf6g75hi5jk16l7mncopmh5omb4eqrgnl567sl6gstukvwjxy6" + "bzlbwj5wbfrABA7Cs5s4lbslwDrdqrtqtee001te_01345679bcdfghjloqstvyz", + "01023456789a0b8cdefgghihjklmn2i2opbqhrnstutv2tgb5uuwbxeyzd85Ao5uiBr3" + "56rttudehArtCDEp2lr3DFghlw0bwFwp0jc6zc3Ec6AlbA_" + "1246789dfghijmnopqtuwxzC", + "012345672893abc8d2efdg6hi0jkfklmnop74qrjdsctsu2n67vpfwn6bncbvxyw95zy" + "36a2A96kBreyC6Bvtzqvue932unf2nDap7mE367101p761_" + "01256789bcefgjklmopstvwA", + "01234567894abcda3e9f3ga6h5ai8bjhk7l2mkkn0opqrstmeu7v4ak71wx7w4yw4a3z" + "23uveuA23xh5sB57BC7Do22EszAFx7Gn3xnHIHJBIubByo3z23A28b899kmk8b_" + "012345678acdgijklnprzBGJ", + "011203242567879aabcad9efg5hddif67jiklm1bno6b2pbpab9a90q6cfa6rsjnbj87" + "68hdhccfthhdth1burf6c9dief25cfkgc9d98v12c9a125ufqwcfw8urswc9d9uf7xf6" + "a6a1q667qwswsysww8_01235789bcdeghijklmnpqstvwx", + "0123456789ab5c9d9efg5hijkablmnlog6pq0rstuvlw4xo0esyfhoz7e2ABpehCcCk4" + "wD01xlrgdAo0be23CBE9d2ukEF2y3GyHtHAB9e5cd2hoBIpqqJKLLiE9lolw0riwMN9e" + "w00rCNiwLilohCxhaxrOCNO6xhABvarO_1345678abcdeghilmoqtvwABCEKMO", + "0123423516789abcdefghijklm9nofphqrset4ujivwxg24yzbxk23fz6ABChifgDsEc" + "g2fFscGtzdvlHI4259Jh4qKGLvMiHNcOoP0Q3523PDEcexOiDQ59d6Fs42GtjL23gEtg" + "sekn8DxkFs9Rzd59Qw35wSFEgE8FQwkn23DQQwwx_" + "023457abefgikmnoprsvwyzCDEFIKQ", + "01234567897a9bc05defghdifjk7lmnojpfqrkisatukvwmxqy6gy3nhfqbz7arAqyxB" + "By0CtDdEFGhHy35IDJluj2KL2K6bBI86HDr4MNhGsJOHBOwPwu1QEDRcwr6701u6LJhH" + "wuatc0MSO0t1lukBLTRcc0z0URHLRcPfHDOHDQHDDJr4HDwPmU_" + "02345689acdefhijklmorwyEFHJLNPQR", + "012345367898ab1cdebdf4ghijkj0kc7lmm0n001kop04qrs5tquuvnwj9xmcq1y2gpz" + "hAmaBszr7845wibcc7bcCDghyEl20F78EGlm2gH8IJgfghKLLIMBLphAaNAHOE988PIw" + "QRwhhSNdpzz1deEQLnwh1ynweuwhmahSSTsEEQLphSwhxmKLyUdU_" + "01234789befghiklmnpqrsuxzADJKOR", + "01023456789abcdef8dg2chiaejhk4lkmbn9oplqr3slitbuvwaxw4ywzABqCrDltrEF" + "GBlHmx9mhIDvxsDBr3k43J7KLhMEhihoNAOBdIKNmPkNoQQlNRaoaxHNIDlqsSQTT3UH" + "xVWHuUIMXbX2HzitdIMvxQiMDlvkMvr3trrw0iQTT301bQQlPVr3_" + "1345789ceghiklnoqstuwxzFHIJLMRVW", + "0123415617897abcdef9c8ghi2jklmlnmopq6rs1gt0m3nueviwxym23525zno2loAwB" + "iCuDvf86no6E56yFGEsHtu6ysjgt52I43nJK9sJHLiMINOPMEp56gbOJ90u5i2fIvfm7" + "IQuD86JK8R6ElmClRJpqSTUS86lm89qSi9vii9jUc8sjhcvVIsc8_" + "0123456789abdehiklmnoqsxzCDEGKNT", + "01234567689abcdefgghij0k1lmnobhpqrjs2fctcusvwlhxly3z3guwABwl8CCDu2Eu" + "fFtat22fghGmuwyHlyIjw3gJnIKy3gIqLMmnNau2OuOmkBPw5OghNLnIjsIjn3QINQbc" + "CNta2f8CCNfFAl45ctOP1RwlP1tauwqrrxNaOPcutauw5E2fScSTRq_" + "012479abdfghjlmnorstvwxyBDEFLO", + "010234567893abcd172eefgheidjk9lbmha21nbo2epg7kqr53013sk9931tuaniiviw" + "eirexyqra2zpokAzrBt8eCvD02qzEhivyFGwtxniE5Huf9FI01vD1tuaarBmJAGyKfnG" + "rBLMbKf901txN0pEOBCP2nBCP6tG64LICv9Q2of9vL4RCvboAbbooftxxStxf9_" + "0124689abceghilmqsuwxyBDFGLOQ", + "01234567829abcd99aefg4hijk5klmab1nokpll65qrstuv3fpgwpl3x0ybzAhi49ahi" + "Bb6BC816g4ab01DzBbACE2FteFivlmv3GwHIC83c3qFt45Jjm74jdG8KILAhefxMaoNH" + "O0d9fpplGwtdPad9fp9JlmbzNfHIDxyPKQJofAPaAhNHaogwR9KDDxok8KNfh9GJfAAh" + "9JJjfA6Bl8PBuGplfpPapl9a_1234578acdegijkmnpqsuwxzABFILM", + "01234567898aabc9dc0edfag3hijklml3nonjpnenqqro00kestuneivsues2wxyn9zA" + "BCc9o0yDklpEFGCH0kkso0sxdc2oIJbIbKhqneo0Etab9b89c96ihaf1LEkllMtMrz89" + "38Ffv0ijf1qrJNHDmOc11lqP1Bfmf1ivvfQRBKksnq1Bf1bKrKfmGmqPc9BK7vKJvLxy" + "MHhS3nmOIJNTR2233hbKhqR2OMyNBCCyMH3nHD9bqrrxUOVDLk7vQRqrhqksonR77oGU" + "9B7vonJN1BpUIJGUOMMHOM_" + "01234789abcdfghijklmnopqrstuxyzACDFGHIJLMOPQSTU", + } + +{} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp b/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp new file mode 100644 index 0000000000..ab28fca9f9 --- /dev/null +++ b/tket/tests/TokenSwapping/Data/FixedSwapSequences.hpp @@ -0,0 +1,90 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** Random problems are easy to generate, but good ones maybe not so easy. + * Another way to generate possibly better problems is to take, first, + * a random problem and solve it with a reasonable TSA. + * We then record the sequence of swaps generated, + * and use the sequence to GENERATE a problem + * (taking the graph to have only edges which arise from mentioned vertex + * swaps and no others, and taking the desired vertex mapping to be + * exactly that which arises from performing the swaps). + * We then solve this new problem with our TSA and compare the number of swaps. + * + * This has the benefit of providing a solution to compare against + * (the original swaps), which is also perhaps quite hard to improve upon, + * because at least one other TSA did not do any better. + * + * Of course it is not a direct comparison of our TSA with others, because + * + * (1): We obtained these swap sequences by removing unused edges in a + * returned solution. This actually changes the problem, so it is possible that + * the returned solution would change if presented with this new problem. + * (Although it would be most elegant mathematically if this did not + * occur, it seems hard to enforce it in an algorithm. There is not much benefit + * in doing so, so it seems unlikely that it would arise "by chance". Even if it + * did, proving that such a property did hold would be hard). + * + * (2): Vertex relabelling also changes the problem, even though it is + * "isomorphic". It seems very unlikely that an algorithm would always return + * isomorphic solutions to isomorphic problems. (Even if it were an + * optimal algorithm, the solutions may not be unique even up to isomorphism). + * + * (3): The TSA may be non-deterministic, due to RNGs. (Our algorithm is + * deterministic, however, since we deliberately set all RNGs to a default seed + * before use). + * + * (4): The returned swaps have already been run through SwapListOptimiser + * passes to reduce them. + */ +struct FixedSwapSequences { + /* Encoding swap sequences as strings rather than inside a vector + * should give smaller C++ and .obj files. + * For convenience, the vertex numbers in each problem should be + * {0,1,2,...,n} with no gaps. Also for convenience, sorted by string length; + * shorter strings are usually "simpler" problems. + * + * "Full" sequences came from problems where every vertex had a token. + * "Partial" sequences came from problems where only some vertics had a token. + * Thus, the vertices which did initially have tokens are also specified; + * for a fair test, this is essential as it may enable reductions + * which would be invalid in the "full" case. + * + * Note that some sequences currently give errors with the best TSA. + * It is due to disconnected architectures, which can cause errors + * (although not always). This is a bug which should be fixed, although + * every architecture we use in practice should be connected. + */ + + std::vector full; + std::vector partial; + std::vector full_with_errors; + std::vector partial_with_errors; + + /** Upon construction, the fixed sequences will all be set. */ + FixedSwapSequences(); +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TSAUtils/test_SwapFunctions.cpp b/tket/tests/TokenSwapping/TSAUtils/test_SwapFunctions.cpp new file mode 100644 index 0000000000..f4666f372f --- /dev/null +++ b/tket/tests/TokenSwapping/TSAUtils/test_SwapFunctions.cpp @@ -0,0 +1,89 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "TokenSwapping/SwapFunctions.hpp" + +using Catch::Matchers::Contains; + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("Get swaps, with exceptions") { + for (size_t ii = 0; ii < 5; ++ii) { + for (size_t jj = 0; jj < 5; ++jj) { + try { + const auto swap = get_swap(ii, jj); + CHECK(ii != jj); + CHECK(swap.first == std::min(ii, jj)); + CHECK(swap.second == std::max(ii, jj)); + } catch (const std::exception& e) { + CHECK(ii == jj); + CHECK_THAT(std::string(e.what()), Contains("equal vertices")); + } + } + } +} + +SCENARIO("Disjoint swaps") { + std::vector swaps; + for (size_t ii = 0; ii < 5; ++ii) { + for (size_t jj = ii + 1; jj < 5; ++jj) { + swaps.push_back(get_swap(ii, jj)); + } + } + std::stringstream disjoint_pairs; + std::stringstream non_disjoint_pairs; + for (const auto& swap1 : swaps) { + for (const auto& swap2 : swaps) { + auto& ss = disjoint(swap1, swap2) ? disjoint_pairs : non_disjoint_pairs; + ss << "[" << swap1.first << swap1.second << " " << swap2.first + << swap2.second << "] "; + } + } + CHECK( + disjoint_pairs.str() == + "[01 23] [01 24] [01 34] [02 13] [02 14] [02 34] [03 12] [03 14] [03 24] " + "[04 " + "12] [04 13] [04 23] [12 03] [12 04] [12 34] [13 02] [13 04] [13 24] [14 " + "02] " + "[14 03] [14 23] [23 01] [23 04] [23 14] [24 01] [24 03] [24 13] [34 01] " + "[34 " + "02] [34 12] "); + CHECK( + non_disjoint_pairs.str() == + "[01 01] [01 02] [01 03] [01 04] [01 12] [01 13] [01 14] [02 01] [02 02] " + "[02 " + "03] [02 04] [02 12] [02 23] [02 24] [03 01] [03 02] [03 03] [03 04] [03 " + "13] " + "[03 23] [03 34] [04 01] [04 02] [04 03] [04 04] [04 14] [04 24] [04 34] " + "[12 " + "01] [12 02] [12 12] [12 13] [12 14] [12 23] [12 24] [13 01] [13 03] [13 " + "12] " + "[13 13] [13 14] [13 23] [13 34] [14 01] [14 04] [14 12] [14 13] [14 14] " + "[14 " + "24] [14 34] [23 02] [23 03] [23 12] [23 13] [23 23] [23 24] [23 34] [24 " + "02] " + "[24 04] [24 12] [24 14] [24 23] [24 24] [24 34] [34 03] [34 04] [34 13] " + "[34 " + "14] [34 23] [34 24] [34 34] "); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp new file mode 100644 index 0000000000..c3f27050b2 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.cpp @@ -0,0 +1,38 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "NeighboursFromEdges.hpp" + +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +NeighboursFromEdges::NeighboursFromEdges() {} + +void NeighboursFromEdges::add_edge(const Swap& edge) { + m_cached_neighbours[edge.first].insert(edge.second); + m_cached_neighbours[edge.second].insert(edge.first); +} + +const std::vector& NeighboursFromEdges::operator()(size_t vertex) { + const auto& neighbours_set = m_cached_neighbours[vertex]; + m_neighbours_storage = {neighbours_set.cbegin(), neighbours_set.cend()}; + return m_neighbours_storage; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp new file mode 100644 index 0000000000..7cedbb075d --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/NeighboursFromEdges.hpp @@ -0,0 +1,63 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "TokenSwapping/NeighboursInterface.hpp" +#include "TokenSwapping/SwapFunctions.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** Simply take a collection of swaps (or edges) and construct the neighbours + * data. */ +class NeighboursFromEdges : public NeighboursInterface { + public: + NeighboursFromEdges(); + + template + explicit NeighboursFromEdges(const SwapContainer& edges); + + /** Add the edges one-by-one if desired. + * @param edge An edge which you know is present in the graph. + */ + void add_edge(const Swap& edge); + + /** The caller must not call this too soon, before "add_edge" calls are + * completed. + * @param vertex A vertex in the graph + * @return All other vertices adjecent to the vertex (stored internally). + */ + virtual const std::vector& operator()(size_t vertex) override; + + private: + /** The key is the vertex, the value is the list of neighbours. */ + std::map> m_cached_neighbours; + + std::vector m_neighbours_storage; +}; + +template +NeighboursFromEdges::NeighboursFromEdges(const SwapContainer& edges) { + for (const Swap& edge : edges) { + add_edge(edge); + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp new file mode 100644 index 0000000000..c3a9692121 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.cpp @@ -0,0 +1,70 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "PermutationTestUtils.hpp" + +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +std::array PermutationTestUtils::get_end_tokens_for_permutation( + unsigned permutation_hash) { + REQUIRE(permutation_hash >= 2); + std::vector digits; + { + unsigned perm_hash_copy = permutation_hash; + while (perm_hash_copy != 0) { + digits.push_back(perm_hash_copy % 10); + perm_hash_copy /= 10; + } + REQUIRE(!digits.empty()); + REQUIRE(std::is_sorted(digits.cbegin(), digits.cend())); + REQUIRE(digits[0] >= 2); + std::reverse(digits.begin(), digits.end()); + } + unsigned cycle_start_v = 0; + std::array tokens; + // No significance to 9999, just a number>5 which stands out + tokens.fill(9999); + for (unsigned cycle_length : digits) { + // We want to enact the cycle (a,b,c,d). Thus a->b, etc. is the vertex + // mapping. Now "tokens" represents what happens IF the vertex mapping is + // applied to [0,1,2,...]. Thus, whatever was INITIALLY at vertex "a" (the + // number "a" itself) should end up at "b", i.e. tokens[b] == a. + for (unsigned ii = 0; ii < cycle_length; ++ii) { + const unsigned source_v = cycle_start_v + ii; + const unsigned target_v = cycle_start_v + ((ii + 1) % cycle_length); + REQUIRE(source_v != target_v); + REQUIRE(source_v <= 5); + REQUIRE(target_v <= 5); + tokens[target_v] = source_v; + } + cycle_start_v += cycle_length; + } + REQUIRE(cycle_start_v <= 6); + for (unsigned ii = cycle_start_v; ii < 6; ++ii) { + tokens[ii] = ii; + } + for (unsigned tok : tokens) { + REQUIRE(tok < 6); + } + return tokens; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp new file mode 100644 index 0000000000..abd4532ce8 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/PermutationTestUtils.hpp @@ -0,0 +1,39 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +// See CanonicalRelabelling.hpp for an explanation of the "permutation hash". + +struct PermutationTestUtils { + /** Given a permutation hash, return the final tokens after performing that + * mapping on the vertices 0,1,2,...,5 in the canonical way. + * @param permutation_hash A decimal number representing a permutation on + * {0,1,...,5}. + * @return The numbers {0,1,2,...,5} giving the final tokens, if we perform + * the permutation, with each start token label equalling the vertex label. + */ + static std::array get_end_tokens_for_permutation( + unsigned permutation_hash); +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp new file mode 100644 index 0000000000..67e64f3bcb --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp @@ -0,0 +1,155 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "SwapSequenceReductionTester.hpp" + +#include + +#include "NeighboursFromEdges.hpp" +#include "TokenSwapping/SwapListSegmentOptimiser.hpp" +#include "TokenSwapping/VertexMapResizing.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +static void reduce_sequence( + const vector& swaps, const VertexMapping& vertex_mapping, + NeighboursFromEdges& neighbours, SwapList& raw_swap_list, + SwapListOptimiser& general_optimiser, + const SwapSequenceReductionTester::Options& options) { + REQUIRE(!swaps.empty()); + + VertexMapResizing map_resizing(neighbours); + SwapListTableOptimiser table_optimiser; + SwapListSegmentOptimiser& segment_optimiser = + table_optimiser.get_segment_optimiser(); + raw_swap_list.clear(); + for (const auto& swap : swaps) { + raw_swap_list.push_back(swap); + } + std::set vertices_with_tokens; + for (const auto& entry : vertex_mapping) { + vertices_with_tokens.insert(entry.first); + } + + if (options.optimise_initial_segment_only) { + general_optimiser.optimise_pass_with_frontward_travel(raw_swap_list); + if (!raw_swap_list.empty()) { + table_optimiser.get_segment_optimiser().optimise_segment( + raw_swap_list.front_id().value(), vertices_with_tokens, map_resizing, + raw_swap_list); + } + return; + } + table_optimiser.optimise( + vertices_with_tokens, map_resizing, raw_swap_list, general_optimiser); +} + +static void check_solution( + VertexMapping problem_vertex_mapping, const SwapList& raw_swap_list) { + // Every vertex swap on a source->target mapping converts it to a new + // source->target map, i.e. map[v] = (token currently at v). + // So we BEGIN with every token equalling its target, + // thus at the end every token must equal its vertex. + for (auto id_opt = raw_swap_list.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = raw_swap_list.next(id); + const auto& swap = raw_swap_list.at(id); + const VertexSwapResult vswap_result(swap, problem_vertex_mapping); + } + REQUIRE(all_tokens_home(problem_vertex_mapping)); +} + +static size_t get_reduced_swaps_size_with_checks( + const vector& swaps, const VertexMapping& problem_vertex_mapping, + NeighboursFromEdges& neighbours_calculator, + SwapListOptimiser& general_optimiser, + const SwapSequenceReductionTester::Options& options) { + SwapList raw_swap_list; + reduce_sequence( + swaps, problem_vertex_mapping, neighbours_calculator, raw_swap_list, + general_optimiser, options); + check_solution(problem_vertex_mapping, raw_swap_list); + REQUIRE(raw_swap_list.size() <= swaps.size()); + return raw_swap_list.size(); +} + +size_t SwapSequenceReductionTester::get_checked_solution_size( + const DecodedProblemData& problem_data, + const SwapSequenceReductionTester::Options& options) { + NeighboursFromEdges neighbours_calculator(problem_data.swaps); + return get_reduced_swaps_size_with_checks( + problem_data.swaps, problem_data.vertex_mapping, neighbours_calculator, + m_general_optimiser, options); +} + +// Reduces the sequence of swaps, checks it, and returns the size. +size_t SwapSequenceReductionTester::get_checked_solution_size( + const DecodedProblemData& problem_data, + const DecodedArchitectureData& architecture_data, + const SwapSequenceReductionTester::Options& options) { + NeighboursFromEdges neighbours_calculator(architecture_data.edges); + return get_reduced_swaps_size_with_checks( + problem_data.swaps, problem_data.vertex_mapping, neighbours_calculator, + m_general_optimiser, options); +} + +SequenceReductionStats::SequenceReductionStats() + : problems(0), + reduced_problems(0), + total_original_swaps(0), + total_original_swaps_for_reduced_problems(0), + total_reduced_swaps(0) {} + +void SequenceReductionStats::add_solution( + size_t original_swaps, size_t reduced_swaps) { + REQUIRE(reduced_swaps <= original_swaps); + ++problems; + if (reduced_swaps < original_swaps) { + ++reduced_problems; + total_original_swaps_for_reduced_problems += original_swaps; + } + total_reduced_swaps += reduced_swaps; + total_original_swaps += original_swaps; +} + +std::string SequenceReductionStats::str() const { + std::stringstream ss; + const size_t swaps_for_equal_probs = + total_original_swaps - total_original_swaps_for_reduced_problems; + const size_t reduced_swaps_for_reduced_probs = + total_reduced_swaps - swaps_for_equal_probs; + const size_t overall_decrease = total_original_swaps - total_reduced_swaps; + ss << "[" << problems - reduced_problems << " equal probs (" + << swaps_for_equal_probs << "); " << reduced_problems << " reduced probs (" + << reduced_swaps_for_reduced_probs << " vs " + << total_original_swaps_for_reduced_problems << ")]\n[Overall reduction " + << total_reduced_swaps << " vs " << total_original_swaps << ": "; + if (total_original_swaps == 0) { + ss << "0%"; + } else { + ss << (100 * overall_decrease) / total_original_swaps << "%"; + } + ss << "]"; + return ss.str(); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp new file mode 100644 index 0000000000..1ba570a2d1 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/SwapSequenceReductionTester.hpp @@ -0,0 +1,69 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "../TestUtils/DecodedProblemData.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" +#include "TokenSwapping/SwapListTableOptimiser.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** Directly test the results of table reductions on fixed swap sequences. */ +class SwapSequenceReductionTester { + public: + struct Options { + bool optimise_initial_segment_only; + }; + + // Reduces the sequence of swaps, checks it, and returns the size. + size_t get_checked_solution_size( + const DecodedProblemData& problem_data, + const DecodedArchitectureData& architecture_data, const Options& options); + + size_t get_checked_solution_size( + const DecodedProblemData& problem_data, const Options& options); + + private: + SwapListOptimiser m_general_optimiser; + // SwapList m_raw_swap_list; +}; + +struct SequenceReductionStats { + size_t problems; + size_t reduced_problems; + size_t total_original_swaps; + + // This only includes problems where the number of swaps strictly decreased + // after table reduction. + size_t total_original_swaps_for_reduced_problems; + + // This is the sum of "reduced_swaps" passed in, over all problems (including + // those where there was no decrease). + size_t total_reduced_swaps; + + SequenceReductionStats(); + + void add_solution(size_t original_swaps, size_t reduced_swaps); + + std::string str() const; +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp new file mode 100644 index 0000000000..024b09862e --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp @@ -0,0 +1,179 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "PermutationTestUtils.hpp" +#include "TokenSwapping/CanonicalRelabelling.hpp" +#include "Utils/RNG.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +// Every element must represent the SAME mapping, up to an appropriate +// relabelling. +typedef vector> + EquivalentMappings; + +// Everything in the OLD mapping does map to the expected vertex. +static void check_that_old_mapping_is_a_subset_of_expected( + const VertexMapping& mapping, + const CanonicalRelabelling::Result& relabelling, + const std::array& end_tokens) { + for (const auto& orig_source_target_pair : mapping) { + const auto& orig_source_v = orig_source_target_pair.first; + const auto& orig_target_v = orig_source_target_pair.second; + if (relabelling.old_to_new_vertices.count(orig_source_v) == 0) { + // If this old vertex is unmentioned, it must be fixed. + REQUIRE(orig_source_v == orig_target_v); + } else { + const auto new_source_v = + relabelling.old_to_new_vertices.at(orig_source_v); + const auto new_target_v = + relabelling.old_to_new_vertices.at(orig_target_v); + // end_tokens is the target->source mapping (the reverse of the usual). + REQUIRE(end_tokens.at(new_target_v) == new_source_v); + } + } +} + +// Everything in the expected new relabelled mapping agrees with the old +// mapping. +static void check_that_nonfixed_new_vertices_are_mentioned_in_old_mapping( + const VertexMapping& mapping, + const CanonicalRelabelling::Result& relabelling, + const std::array& end_tokens) { + for (unsigned new_target_v = 0; new_target_v < end_tokens.size(); + ++new_target_v) { + const auto new_source_v = end_tokens[new_target_v]; + if (new_source_v == new_target_v) { + // Is it mentioned in the old mapping? If so, it must be fixed. + if (new_source_v < relabelling.new_to_old_vertices.size()) { + const auto old_source_v = + relabelling.new_to_old_vertices.at(new_source_v); + if (mapping.count(old_source_v) != 0) { + // It IS mentioned, it MUST be fixed. + REQUIRE(mapping.at(old_source_v) == old_source_v); + } + } + continue; + } + // Different source, target, so the original mapping must mention this + // (otherwise, the mapping would be incomplete). + const auto old_source_v = relabelling.new_to_old_vertices.at(new_source_v); + const auto old_target_v = relabelling.new_to_old_vertices.at(new_target_v); + REQUIRE(mapping.at(old_source_v) == old_target_v); + } +} + +static void check_relabelling(const CanonicalRelabelling::Result& relabelling) { + REQUIRE( + relabelling.new_to_old_vertices.size() == + relabelling.old_to_new_vertices.size()); + REQUIRE(relabelling.new_to_old_vertices.size() >= 2); + for (unsigned new_v = 0; new_v < relabelling.new_to_old_vertices.size(); + ++new_v) { + const auto old_v = relabelling.new_to_old_vertices[new_v]; + REQUIRE(relabelling.old_to_new_vertices.at(old_v) == new_v); + } + for (const auto& old_new_pair : relabelling.old_to_new_vertices) { + REQUIRE( + relabelling.new_to_old_vertices.at(old_new_pair.second) == + old_new_pair.first); + } +} + +static void check_that_all_entries_have_the_same_permutation( + unsigned permutation_hash, const EquivalentMappings& list) { + REQUIRE(!list.empty()); + REQUIRE(permutation_hash >= 2); + + // end_tokens[i] tells us the SOURCE vertex of whatever token is now at vertex + // i. + const auto end_tokens = + PermutationTestUtils::get_end_tokens_for_permutation(permutation_hash); + + for (const auto& entry : list) { + const auto& mapping = entry.first; + const auto& relabelling = entry.second; + REQUIRE(relabelling.permutation_hash == permutation_hash); + check_relabelling(relabelling); + check_that_old_mapping_is_a_subset_of_expected( + mapping, relabelling, end_tokens); + check_that_nonfixed_new_vertices_are_mentioned_in_old_mapping( + mapping, relabelling, end_tokens); + } +} + +// Create various random permutations on sets of size <= 6 of arbitrary labels, +// and see that the relabellings work. +SCENARIO("Relabelling test for random mappings") { + const unsigned number_of_vertices = 5; + vector original_labels; + + // The generated mappings, together with the relabelling results. + // The key is the permutation hash. + std::map entries; + RNG rng; + VertexMapping original_map; + CanonicalRelabelling relabeller; + + for (unsigned nn = 0; nn < 200; ++nn) { + original_map.clear(); + for (unsigned ii = 0; ii < number_of_vertices; ++ii) { + original_map[rng.get_size_t(10000)]; + } + original_labels.clear(); + for (const auto& entry : original_map) { + original_labels.push_back(entry.first); + } + rng.do_shuffle(original_labels); + { + size_t ii = 0; + for (auto& entry : original_map) { + entry.second = original_labels[ii]; + ++ii; + } + } + const auto result = relabeller(original_map); + REQUIRE(!result.too_many_vertices); + if (result.identity) { + // Don't store identities. + REQUIRE(all_tokens_home(original_map)); + REQUIRE(result.permutation_hash == 0); + REQUIRE(result.old_to_new_vertices.empty()); + REQUIRE(result.new_to_old_vertices.empty()); + } else { + REQUIRE(result.permutation_hash > 0); + REQUIRE(result.old_to_new_vertices.size() == original_map.size()); + REQUIRE(result.new_to_old_vertices.size() == original_map.size()); + auto& list = entries[result.permutation_hash]; + list.push_back(std::make_pair(original_map, result)); + } + } + + for (const auto& entry : entries) { + check_that_all_entries_have_the_same_permutation(entry.first, entry.second); + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp new file mode 100644 index 0000000000..047c2804e8 --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp @@ -0,0 +1,200 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "../TestUtils/DebugFunctions.hpp" +#include "TokenSwapping/ExactMappingLookup.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { +struct ResultChecker { + size_t failed_due_to_too_many_vertices = 0; + size_t failed_due_to_table_missing_entry = 0; + size_t success = 0; + + void check_failed_result( + const ExactMappingLookup::Result& lookup_result, + const VertexMapping& desired_mapping) { + REQUIRE(!lookup_result.success); + if (lookup_result.too_many_vertices) { + CHECK(desired_mapping.size() >= 7); + ++failed_due_to_too_many_vertices; + return; + } + // There WERE enough edges. Why couldn't it find a solution? + // The graph must have been too big. + // The table should cover all 4-vertex mappings + // (at least up to depth 12, and probably all). + CHECK(desired_mapping.size() >= 5); + ++failed_due_to_table_missing_entry; + } + + void check_successful_result( + const ExactMappingLookup::Result& lookup_result, + const vector& sorted_edges_vect, VertexMapping desired_mapping) { + REQUIRE(lookup_result.success); + ++success; + // It succeeded. So, now we have to check it! + CHECK(!lookup_result.too_many_vertices); + + // desired_mapping is a source->target mapping. + // Interpret it to mean that mapping[i] = (current token on vertex i). + // So initially, (token at i) = (target vertex). + // Then, performing the swaps, all tokens should reach their home. + for (const auto& swap : lookup_result.swaps) { + REQUIRE(std::binary_search( + sorted_edges_vect.cbegin(), sorted_edges_vect.cend(), swap)); + std::swap(desired_mapping[swap.first], desired_mapping[swap.second]); + } + CHECK(all_tokens_home(desired_mapping)); + } +}; +} // namespace + +// We know that it succeeded and returned some swaps. +// Call it again with various max number of swaps limits. +static void recalculate_for_successful_problem_with_number_of_swaps_limits( + const VertexMapping& desired_mapping, const vector& edges_vect, + const vector& sorted_edges_vect, unsigned number_of_swaps, + ExactMappingLookup& lookup, ResultChecker& checker) { + for (unsigned max_number_of_swaps = 0; max_number_of_swaps < number_of_swaps; + ++max_number_of_swaps) { + const auto& lookup_result = + lookup(desired_mapping, edges_vect, max_number_of_swaps); + CHECK(!lookup_result.success); + } + for (unsigned max_number_of_swaps = number_of_swaps; + max_number_of_swaps < number_of_swaps + 5; ++max_number_of_swaps) { + const auto& lookup_result = + lookup(desired_mapping, edges_vect, max_number_of_swaps); + CHECK(lookup_result.success); + CHECK(lookup_result.swaps.size() == number_of_swaps); + checker.check_successful_result( + lookup_result, sorted_edges_vect, desired_mapping); + } +} + +// A simple monotonic transformation, avoids contiguous vertices. +static unsigned get_vertex_number(unsigned ii) { return 10 * ii * (ii + 2); } + +SCENARIO("Test exact mapping table lookup for wheel") { + // A star is vertex 0, joined to 1,2,3,...,m. + // A wheel also joins 1,2,...,m to make a cycle. + VertexMapping desired_mapping; + VertexMapping inverse_mapping; + ExactMappingLookup lookup; + + // Maintain an unsorted vector, just in case sorting them makes a difference + // (although it shouldn't). + vector all_edges; + vector all_edges_sorted; + vector vertices_used; + ResultChecker checker; + + for (unsigned number_of_spokes = 3; number_of_spokes <= 6; + ++number_of_spokes) { + vertices_used.clear(); + vertices_used.push_back(0); + all_edges.clear(); + for (unsigned ii = 1; ii <= number_of_spokes; ++ii) { + const auto vv = get_vertex_number(ii); + vertices_used.push_back(vv); + all_edges.push_back(get_swap(0, vv)); + } + // Complete the cycle on 1,2,...,m. + all_edges.push_back(get_swap(vertices_used.back(), vertices_used[1])); + for (unsigned ii = 1; ii < vertices_used.size(); ++ii) { + all_edges.push_back(get_swap(vertices_used[ii - 1], vertices_used[ii])); + } + + all_edges_sorted = all_edges; + std::sort(all_edges_sorted.begin(), all_edges_sorted.end()); + desired_mapping.clear(); + + // Set the SOURCE vertices. + for (auto vv : vertices_used) { + desired_mapping[vv]; + } + for (int perm_counter = 0;;) { + // Set the TARGET vertices. + { + unsigned ii = 0; + for (auto& entry : desired_mapping) { + entry.second = vertices_used[ii]; + ++ii; + } + } + bool succeeded = false; + unsigned number_of_swaps = 0; + + // We have a mapping. Try to look it up. Also, look up the inverse. + inverse_mapping = get_reversed_map(desired_mapping); + { + // Care...because the result is stored internally, + // another call to lookup will invalidate it! + const auto& lookup_result = lookup(desired_mapping, all_edges); + succeeded = lookup_result.success; + if (lookup_result.success) { + checker.check_successful_result( + lookup_result, all_edges_sorted, desired_mapping); + number_of_swaps = lookup_result.swaps.size(); + + const auto& inverse_lookup_result = + lookup(inverse_mapping, all_edges); + CHECK(inverse_lookup_result.success); + + checker.check_successful_result( + inverse_lookup_result, all_edges_sorted, inverse_mapping); + CHECK(number_of_swaps == inverse_lookup_result.swaps.size()); + } else { + // It failed. Why? + checker.check_failed_result(lookup_result, desired_mapping); + const auto& inverse_lookup_result = + lookup(inverse_mapping, all_edges); + checker.check_failed_result(inverse_lookup_result, inverse_mapping); + } + } + + if (succeeded) { + recalculate_for_successful_problem_with_number_of_swaps_limits( + desired_mapping, all_edges, all_edges_sorted, number_of_swaps, + lookup, checker); + } + ++perm_counter; + if (perm_counter > 10) { + break; + } + if (!std::next_permutation(vertices_used.begin(), vertices_used.end())) { + break; + } + } + } + + CHECK(checker.failed_due_to_too_many_vertices == 22); + CHECK(checker.failed_due_to_table_missing_entry == 0); + CHECK(checker.success == 231); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp new file mode 100644 index 0000000000..856048668c --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp @@ -0,0 +1,154 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "TokenSwapping/FilteredSwapSequences.hpp" +#include "Utils/RNG.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("Trivial table lookup tests") { + // Permutation hash 0 is the identity. + for (unsigned edges_bitset = 0; edges_bitset < 50; ++edges_bitset) { + const FilteredSwapSequences::SingleSequenceData identity_result( + 0, edges_bitset, 10); + CHECK(identity_result.edges_bitset == 0); + CHECK(identity_result.swaps_code == 0); + CHECK(identity_result.number_of_swaps == 0); + } + + // (0,1) is the first swap (index 0). So, just need to include that bit. + for (unsigned edges_bitset = 1; edges_bitset < 50; edges_bitset += 2) { + const FilteredSwapSequences::SingleSequenceData single_swap_result( + 2, edges_bitset, 10); + CHECK(single_swap_result.edges_bitset == 0x1); + CHECK(single_swap_result.swaps_code == 0x1); + CHECK(single_swap_result.number_of_swaps == 1); + } + + // Enact a non-identity permutation without edges; impossible! + const vector nontrivial_permutation_hashes{2, 3, 4, 5, 6, + 22, 33, 32, 42, 222}; + for (unsigned perm_hash : nontrivial_permutation_hashes) { + const FilteredSwapSequences::SingleSequenceData impossible_result( + perm_hash, 0x0, 10); + CHECK(impossible_result.edges_bitset == 0); + CHECK(impossible_result.swaps_code == 0); + CHECK( + impossible_result.number_of_swaps == + std::numeric_limits::max()); + } +} + +SCENARIO("Random entries test") { + // Note: the entries are definitely NOT real swap sequence codes, + // they are just random nunmbers. + + const unsigned num_bits = 15; + + std::map + original_entries; + // Make a vector, with duplicates. + vector codes_vect; + + RNG rng; + + for (unsigned nn = 0; nn < 1000; ++nn) { + const auto num_swaps = rng.get_size_t(1, 6); + SwapConversion::SwapHash code = 0; + SwapConversion::EdgesBitset edges_bitset = 0; + + for (unsigned mm = 0; mm < num_swaps; ++mm) { + const auto new_swap = rng.get_size_t(1, num_bits); + code <<= 4; + code |= new_swap; + edges_bitset |= (1u << (new_swap - 1)); + } + auto& entry = original_entries[code]; + entry.edges_bitset = edges_bitset; + entry.swaps_code = code; + entry.number_of_swaps = num_swaps; + for (int kk = 0; kk < 3; ++kk) { + codes_vect.push_back(code); + } + } + rng.do_shuffle(codes_vect); + + FilteredSwapSequences filtered_sequences; + REQUIRE(filtered_sequences.get_total_number_of_entries() == 0); + filtered_sequences.initialise(codes_vect); + REQUIRE( + filtered_sequences.get_total_number_of_entries() == + original_entries.size()); + + // Now, look up every single edge bitset in turn and check that it finds the + // (joint) fewest number of swaps. + const SwapConversion::EdgesBitset max_bitset = (1u << num_bits) - 1; + for (SwapConversion::EdgesBitset bitset = 0; bitset <= max_bitset; ++bitset) { + // By brute force, find the (joint) fewest number of swaps in a sequence + // using only this bitset. + SwapConversion::SwapHash fewest_swaps_code = + std::numeric_limits::max(); + unsigned number_of_swaps = 10000; + for (const auto& entry : original_entries) { + if (entry.first > fewest_swaps_code) { + break; + } + REQUIRE(entry.second.number_of_swaps <= number_of_swaps); + // Is it a subset? + if ((entry.second.edges_bitset & bitset) != entry.second.edges_bitset) { + continue; + } + // We've found a better entry than what we've got. + number_of_swaps = entry.second.number_of_swaps; + fewest_swaps_code = entry.first; + } + + for (unsigned max_num_swaps = 1; max_num_swaps < num_bits + 3; + ++max_num_swaps) { + const auto result = + filtered_sequences.get_lookup_result(bitset, max_num_swaps); + if (result.number_of_swaps <= max_num_swaps) { + // It found an entry. It must be an existing entry. + const auto& existing_entry = original_entries.at(result.swaps_code); + REQUIRE(result.number_of_swaps == existing_entry.number_of_swaps); + REQUIRE(result.edges_bitset == existing_entry.edges_bitset); + REQUIRE(result.swaps_code == existing_entry.swaps_code); + + // ...and it must be valid... + REQUIRE((result.edges_bitset & bitset) == result.edges_bitset); + REQUIRE(result.number_of_swaps == number_of_swaps); + } else { + // No entry was found. It MUST be because none actually exist, subject + // to the constraints. + REQUIRE(number_of_swaps > max_num_swaps); + // Must be a null result. + REQUIRE(result.edges_bitset == 0); + REQUIRE(result.swaps_code == 0); + REQUIRE(result.number_of_swaps == std::numeric_limits::max()); + } + } + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp new file mode 100644 index 0000000000..de71d7086d --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp @@ -0,0 +1,213 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "../Data/FixedCompleteSolutions.hpp" +#include "../Data/FixedSwapSequences.hpp" +#include "SwapSequenceReductionTester.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +static void add_message( + const SequenceReductionStats& stats, const std::string& extra_message, + const SwapSequenceReductionTester::Options& options, + vector& calc_messages) { + std::stringstream ss; + ss << "[n=" << calc_messages.size() << ", " << extra_message + << ": init segm optim? " << std::boolalpha + << options.optimise_initial_segment_only << "]\n" + << stats.str(); + calc_messages.push_back(ss.str()); +} + +static void check_final_messages( + vector& expected_messages, + const vector& calc_messages) { + CHECK(expected_messages.size() == calc_messages.size()); + expected_messages.resize(calc_messages.size()); + for (unsigned ii = 0; ii < calc_messages.size(); ++ii) { + CHECK(expected_messages[ii] == calc_messages[ii]); + } +} + +// Reduce the fixed swap sequences, with edge set implicitly defined +// by the swaps themselves. +SCENARIO("Fixed swap sequences reduction") { +#ifdef TKET_TESTS_FULL + // The long tests take ~5 seconds on a 2021 Windows laptop. + vector expected_messages{ + "[n=0, Full tokens: init segm optim? true]\n" + "[478 equal probs (17115); 2 reduced probs (25 vs 29)]\n" + "[Overall reduction 17140 vs 17144: 0%]", + + "[n=1, Partial tokens: init segm optim? true]\n" + "[880 equal probs (25432); 16 reduced probs (385 vs 407)]\n" + "[Overall reduction 25817 vs 25839: 0%]", + + "[n=2, Full tokens: init segm optim? false]\n" + "[423 equal probs (14323); 57 reduced probs (2693 vs 2821)]\n" + "[Overall reduction 17016 vs 17144: 0%]", + + "[n=3, Partial tokens: init segm optim? false]\n" + "[658 equal probs (12376); 238 reduced probs (12962 vs 13463)]\n" + "[Overall reduction 25338 vs 25839: 1%]"}; + + const unsigned skip_number = 1; +#else + // The shorter tests take ~0.4 seconds. + vector expected_messages{ + "[n=0, Full tokens: init segm optim? true]\n" + "[25 equal probs (846); 0 reduced probs (0 vs 0)]\n" + "[Overall reduction 846 vs 846: 0%]", + + "[n=1, Partial tokens: init segm optim? true]\n" + "[46 equal probs (1348); 0 reduced probs (0 vs 0)]\n" + "[Overall reduction 1348 vs 1348: 0%]", + + "[n=2, Full tokens: init segm optim? false]\n" + "[24 equal probs (822); 1 reduced probs (22 vs 24)]\n" + "[Overall reduction 844 vs 846: 0%]", + + "[n=3, Partial tokens: init segm optim? false]\n" + "[34 equal probs (461); 12 reduced probs (844 vs 887)]\n" + "[Overall reduction 1305 vs 1348: 3%]"}; + const unsigned skip_number = 20; +#endif + + const FixedSwapSequences fixed_sequences; + SwapSequenceReductionTester tester; + SwapSequenceReductionTester::Options options; + vector calc_messages; + + const auto add_solutions = [&tester, &options, skip_number]( + const vector& seq_codes, + SequenceReductionStats& stats) { + for (unsigned ii = 0; ii < seq_codes.size(); ++ii) { + if (ii % skip_number != 0) { + continue; + } + const auto& code_str = seq_codes[ii]; + const DecodedProblemData problem_data(code_str); + const auto reduced_size = + tester.get_checked_solution_size(problem_data, options); + stats.add_solution(problem_data.swaps.size(), reduced_size); + } + }; + + for (int ii = 0; ii < 2; ++ii) { + options.optimise_initial_segment_only = (ii % 2 == 0); + { + SequenceReductionStats full_tokens_stats; + add_solutions(fixed_sequences.full, full_tokens_stats); + add_solutions(fixed_sequences.full_with_errors, full_tokens_stats); + add_message(full_tokens_stats, "Full tokens", options, calc_messages); + } + { + SequenceReductionStats partial_tokens_stats; + add_solutions(fixed_sequences.partial, partial_tokens_stats); + add_solutions(fixed_sequences.partial_with_errors, partial_tokens_stats); + add_message( + partial_tokens_stats, "Partial tokens", options, calc_messages); + } + } + check_final_messages(expected_messages, calc_messages); +} + +// The actual problem input data: the graph may have extra edges +// not present in the returned solution. +SCENARIO("Fixed complete problems") { +#ifdef TKET_TESTS_FULL + // The long tests take ~10 seconds on a 2021 Windows laptop. + vector expected_messages{ + "[n=0, Small: init segm optim? false]\n" + "[249 equal probs (1353); 29 reduced probs (163 vs 204)]\n" + "[Overall reduction 1516 vs 1557: 2%]", + + "[n=1, Medium: init segm optim? false]\n" + "[167 equal probs (2650); 60 reduced probs (1107 vs 1234)]\n" + "[Overall reduction 3757 vs 3884: 3%]", + + "[n=2, Large: init segm optim? false]\n" + "[164 equal probs (12771); 408 reduced probs (43946 vs 45894)]\n" + "[Overall reduction 56717 vs 58665: 3%]"}; + + const unsigned skip_number = 1; +#else + // The shorter tests take ~0.4 seconds. + vector expected_messages{ + "[n=0, Small: init segm optim? false]\n" + "[8 equal probs (48); 1 reduced probs (9 vs 10)]\n" + "[Overall reduction 57 vs 58: 1%]", + + "[n=1, Medium: init segm optim? false]\n" + "[8 equal probs (138); 1 reduced probs (23 vs 24)]\n" + "[Overall reduction 161 vs 162: 0%]", + + "[n=2, Large: init segm optim? false]\n" + "[10 equal probs (928); 16 reduced probs (1657 vs 1743)]\n" + "[Overall reduction 2585 vs 2671: 3%]"}; + const unsigned skip_number = 20; +#endif + + SwapSequenceReductionTester::Options options; + options.optimise_initial_segment_only = false; + + // Separate problems into small, medium, large. + vector stats(3); + + const FixedCompleteSolutions complete_solutions; + SwapSequenceReductionTester tester; + + for (const auto& problem_entry : complete_solutions.solutions) { + // First element encodes the edges. + const DecodedArchitectureData arch_data(problem_entry.second[0]); + for (unsigned ii = 1; ii < problem_entry.second.size(); ++ii) { + if (ii % skip_number != 0) { + continue; + } + const auto& problem_str = problem_entry.second[ii]; + const DecodedProblemData problem_data( + problem_str, DecodedProblemData::RequireContiguousVertices::NO); + + // Small + unsigned stats_index = 0; + if (problem_str.size() > 25) { + // Medium + stats_index = 1; + } + if (problem_str.size() > 60) { + // Large + stats_index = 2; + } + const auto reduced_size = + tester.get_checked_solution_size(problem_data, arch_data, options); + stats[stats_index].add_solution(problem_data.swaps.size(), reduced_size); + } + } + vector calc_messages; + add_message(stats[0], "Small", options, calc_messages); + add_message(stats[1], "Medium", options, calc_messages); + add_message(stats[2], "Large", options, calc_messages); + check_final_messages(expected_messages, calc_messages); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp new file mode 100644 index 0000000000..d73070888d --- /dev/null +++ b/tket/tests/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp @@ -0,0 +1,178 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "PermutationTestUtils.hpp" +#include "TokenSwapping/SwapConversion.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" +#include "TokenSwapping/SwapSequenceTable.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +// Extra redundant data in the table slows it down, +// but does not affect the returned results. +// But the stored swap sequences are used directly without further checks +// or optimisations, so they should be as close to optimal as possible. +static void test_irreducibility_of_codes( + unsigned permutation_hash, const vector& codes, + SwapListOptimiser& optimiser, SwapList& swap_list) { + for (auto& code : codes) { + swap_list.fast_clear(); + auto swap_sequence_hash_copy = code; + while (swap_sequence_hash_copy != 0) { + const Swap& swap = + SwapConversion::get_swap_from_hash(swap_sequence_hash_copy & 0xF); + swap_list.push_back(swap); + swap_sequence_hash_copy >>= 4; + } + const auto initial_number_of_swaps = swap_list.size(); + + // We don't yet have good theoretical results about order of passes, + // so just try all of them. + optimiser.optimise_pass_with_zero_travel(swap_list); + REQUIRE(initial_number_of_swaps == swap_list.size()); + optimiser.optimise_pass_with_token_tracking(swap_list); + REQUIRE(initial_number_of_swaps == swap_list.size()); + + // This may reorder the swaps, without reducing. + optimiser.optimise_pass_with_frontward_travel(swap_list); + REQUIRE(initial_number_of_swaps == swap_list.size()); + + // We'd LIKE to have a theorem assuring us that this pass isn't necessary + // after the previous passes, but currently we don't. + optimiser.optimise_pass_with_token_tracking(swap_list); + REQUIRE(initial_number_of_swaps == swap_list.size()); + optimiser.optimise_pass_with_zero_travel(swap_list); + REQUIRE(initial_number_of_swaps == swap_list.size()); + } +} + +// All the swap sequences encoded in the vector should enact +// the given permutation. +static void test_correctness_of_codes( + unsigned permutation_hash, const vector& codes) { + REQUIRE(codes.size() >= 2); + + // Reconstruct the desired permutation from the hash. + const auto expected_tokens = + PermutationTestUtils::get_end_tokens_for_permutation(permutation_hash); + + // Element i is the token at vertex i. + // We start with tokens 0,1,2,...,5 on vertices 0,1,2,...,5, + // then perform the swaps. + std::array tokens; + for (const auto& code : codes) { + std::iota(tokens.begin(), tokens.end(), 0); + auto swap_sequence_hash_copy = code; + unsigned number_of_swaps = 0; + while (swap_sequence_hash_copy != 0) { + const Swap& swap = + SwapConversion::get_swap_from_hash(swap_sequence_hash_copy & 0xF); + swap_sequence_hash_copy >>= 4; + std::swap(tokens[swap.first], tokens[swap.second]); + ++number_of_swaps; + } + REQUIRE(number_of_swaps >= 1); + + // Actually, 16 is the maximum. + CHECK(number_of_swaps <= 12); + REQUIRE(tokens == expected_tokens); + } +} + +// The swap sequences encoded in the vector should not have +// any redundancies: if sequences S1, S2 have edge bitsets E1, E2 +// (i.e., E(j) is the set of swaps used in S(j)), AND give the same permutation, +// then E1 != E2. (No point in having both). +// Also, if E1 is a subset of E2, then length(S2) < length(S1). +// (Otherwise, S2 would be a pointless entry: whenever S2 is possible, +// S1 is also possible, with an equal or smaller number of swaps). +static void test_redundancies( + unsigned permutation_hash, const vector& codes) { + vector edge_bitsets; + edge_bitsets.reserve(codes.size()); + for (const auto& code : codes) { + edge_bitsets.push_back(SwapConversion::get_edges_bitset(code)); + } + // Crude quadratic algorithm to check which codes are redundant. + // Don't rely on sorted codes. + for (unsigned ii = 0; ii < codes.size(); ++ii) { + for (unsigned jj = 0; jj < codes.size(); ++jj) { + if (ii == jj) { + continue; + } + const auto intersection = edge_bitsets[ii] & edge_bitsets[jj]; + const bool e1_subset_of_e2 = (intersection == edge_bitsets[ii]); + const auto num_swaps1 = SwapConversion::get_number_of_swaps(codes[ii]); + const auto num_swaps2 = SwapConversion::get_number_of_swaps(codes[jj]); + + if (e1_subset_of_e2 && num_swaps1 <= num_swaps2) { + INFO( + "For perm.hash " + << permutation_hash << ", Code 1: 0x" << std::hex << codes[ii] + << " only uses swaps from code 2: 0x" << codes[jj] + << ", and uses the same or fewer swaps (" << std::dec << num_swaps1 + << " vs " << num_swaps2 + << "). Thus code 2 is pointless and could be removed."); + CHECK(false); + } + } + } +} + +// Checks that all entries returned by the table do actually +// give the required permutation of vertices. +SCENARIO("Fixed table entries test") { + const auto table = SwapSequenceTable::get_table(); + // const auto table = get_new_table(); + SwapListOptimiser optimiser; + SwapList swap_list; + unsigned total_entries = 0; + for (const auto& entry : table) { + REQUIRE(entry.first >= 2); + test_correctness_of_codes(entry.first, entry.second); + test_irreducibility_of_codes( + entry.first, entry.second, optimiser, swap_list); + test_redundancies(entry.first, entry.second); + + // No duplication. Not necessary, but a good test. + CHECK(std::is_sorted(entry.second.cbegin(), entry.second.cend())); + CHECK( + std::adjacent_find(entry.second.cbegin(), entry.second.cend()) == + entry.second.cend()); + + // NOTE: we should really also test that inverse mappings are not stored in + // the table. This was previously true, but a negligibly small number of + // entries have crept in. They're a bit fiddly to track down and remove, so + // forget about them for now. (Confusion: within each permutation hash, e.g. + // 32 corresponding to (012)(34)(5), the INVERSE mapping is (021)(34)(5). + // This will have the same permutation hash, but of course vertices must be + // RELABELLED. To find the inverse entry in the table, we cannot JUST + // reverse the swaps, we also need to relabel them. + /// TODO: test for, track down and remove redundant inverse entries. + total_entries += entry.second.size(); + } + CHECK(total_entries == 7939); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp new file mode 100644 index 0000000000..9b94b9a7a8 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp @@ -0,0 +1,62 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ArchitectureEdgesReimplementation.hpp" + +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +// This is just copied from Architecture.cpp, +// but we WANT it to remain fixed for testing purposes; +// do NOT keep in sync! +std::vector> get_square_grid_edges( + unsigned dim_r, const unsigned dim_c, const unsigned layers) { + // A trivial injective hash function on the cuboid. + const auto vertex = [dim_r, dim_c, layers]( + unsigned ver, unsigned hor, unsigned l) -> unsigned { + REQUIRE(ver < dim_r); + REQUIRE(hor < dim_c); + REQUIRE(l < layers); + return ver + dim_r * (hor + dim_c * l); + }; + + std::vector> edges; + for (unsigned l = 0; l < layers; l++) { + for (unsigned ver = 0; ver < dim_r; ver++) { + for (unsigned hor = 0; hor < dim_c; hor++) { + const auto n = vertex(ver, hor, l); + if (hor != dim_c - 1) { + const auto h_neighbour = vertex(ver, hor + 1, l); + edges.push_back({n, h_neighbour}); + } + if (ver != dim_r - 1) { + const auto v_neighbour = vertex(ver + 1, hor, l); + edges.push_back({n, v_neighbour}); + } + if (l != layers - 1) { + const auto l_neighbour = vertex(ver, hor, l + 1); + edges.push_back({n, l_neighbour}); + } + } + } + } + return edges; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp new file mode 100644 index 0000000000..b730ad7bd2 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.hpp @@ -0,0 +1,36 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +// We would like to use the SquareGrid Architecture class, +// but the order of edges is not guaranteed (an implementation detail). +// Therefore, we copy the code to have a single, fixed ordering +// for testing purposes with token swapping. +// NOTE: the only important thing is the order of edges, +// NOT the specific vertex labels. The vertices will be relabelled +// in order of appearance by ArchitectureMapping. +std::vector> get_square_grid_edges( + unsigned dim_r, const unsigned dim_c, const unsigned layers); + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp new file mode 100644 index 0000000000..3d79e68475 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.cpp @@ -0,0 +1,165 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "BestTsaTester.hpp" + +#include + +#include "Architecture/BestTsaWithArch.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { + +// We are going to treat the raw data in FixedSwapSequences etc. as +// the "correct" data, which we don't want to relabel or process further. +// +// But when an Architecture object is created with a vector of edges, +// given by pairs ("raw" vertices), +// vertex relabelling takes place. +// Thus we need an extra layer of conversion to get back what we want. +struct VertexRelabellingManager { + std::map raw_to_internal_map; + // The internal indices are, of course, 0,1,2,...,N for some N, + // and therefore we can use a vector instead of a map. + vector internal_to_raw_map; + + // The exact same edges that were used to construct the Architecture object + // (in the same order!) must be passed in. + explicit VertexRelabellingManager( + const vector>& raw_edges) { + for (auto edge : raw_edges) { + size_t next_index = raw_to_internal_map.size(); + if (raw_to_internal_map.count(edge.first) == 0) { + raw_to_internal_map[edge.first] = next_index; + } + next_index = raw_to_internal_map.size(); + if (raw_to_internal_map.count(edge.second) == 0) { + raw_to_internal_map[edge.second] = next_index; + } + } + internal_to_raw_map.resize(raw_to_internal_map.size()); + for (const auto& entry : raw_to_internal_map) { + internal_to_raw_map[entry.second] = entry.first; + } + } + Swap get_raw_swap(Swap internal_swap) const { + return get_swap( + internal_to_raw_map.at(internal_swap.first), + internal_to_raw_map.at(internal_swap.second)); + } + + // To be used as input to the TSA. + // Gives the source->target mappings for INTERNAL vertices. + VertexMapping get_internal_mapping_for_tsa_input( + const VertexMapping& raw_mapping) const { + VertexMapping mapping; + for (const auto& entry : raw_mapping) { + mapping[raw_to_internal_map.at(entry.first)] = + raw_to_internal_map.at(entry.second); + } + return mapping; + } +}; +} // namespace + +size_t BestTsaTester::get_checked_solution_size( + const DecodedProblemData& problem_data) { + m_architecture_work_data.edges.clear(); + for (const auto& swap : problem_data.swaps) { + m_architecture_work_data.edges.insert(swap); + } + m_architecture_work_data.number_of_vertices = 0; + return get_checked_solution_size(problem_data, m_architecture_work_data); +} + +size_t BestTsaTester::get_checked_solution_size( + const DecodedProblemData& problem_data, + const DecodedArchitectureData& architecture_data) { + CHECK(problem_data.number_of_vertices >= 4); + if (architecture_data.number_of_vertices > 0) { + CHECK( + architecture_data.number_of_vertices >= + problem_data.number_of_vertices); + } + // problem_data.number_of_vertices only includes the vertices mentioned in the + // solution swaps. + // architecture_data.number_of_vertices is EITHER set to zero, + // OR is calculated from the EDGES in the architecture, and hence is correct. + const auto number_of_vertices = std::max( + architecture_data.number_of_vertices, problem_data.number_of_vertices); + + check_mapping(problem_data.vertex_mapping); + for (const auto& swap : problem_data.swaps) { + REQUIRE(architecture_data.edges.count(swap) != 0); + } + for (const auto& edge : architecture_data.edges) { + REQUIRE(edge.first < number_of_vertices); + REQUIRE(edge.second < number_of_vertices); + } + m_edges_vect = vector>{ + architecture_data.edges.cbegin(), architecture_data.edges.cend()}; + + REQUIRE(problem_data.vertex_mapping.size() >= 1); + REQUIRE(problem_data.vertex_mapping.size() <= number_of_vertices); + REQUIRE(problem_data.vertex_mapping.crbegin()->first < number_of_vertices); + + const bool full_tokens = + problem_data.vertex_mapping.size() == number_of_vertices; + + const Architecture arch(m_edges_vect); + const ArchitectureMapping arch_mapping(arch, m_edges_vect); + const VertexRelabellingManager relabelling_manager(m_edges_vect); + m_raw_swap_list.clear(); + m_vertex_mapping_copy = + relabelling_manager.get_internal_mapping_for_tsa_input( + problem_data.vertex_mapping); + + BestTsaWithArch::append_solution( + m_raw_swap_list, m_vertex_mapping_copy, arch_mapping); + + // Now check the calculated solution. + // Set it back to the raw, i.e. "proper" mapping. + m_vertex_mapping_copy = problem_data.vertex_mapping; + + for (auto id_opt = m_raw_swap_list.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = m_raw_swap_list.next(id); + auto& swap = m_raw_swap_list.at(id); + // This is an "internal" swap, so needs conversion back to "raw". + swap = relabelling_manager.get_raw_swap(swap); + + const VertexSwapResult vswap_result(swap, m_vertex_mapping_copy); + if (full_tokens) { + REQUIRE(vswap_result.tokens_moved == 2); + } else { + // We require our best TSA to avoid empty swaps. + REQUIRE(vswap_result.tokens_moved >= 1); + REQUIRE(vswap_result.tokens_moved <= 2); + } + REQUIRE(architecture_data.edges.count(swap) != 0); + } + REQUIRE(all_tokens_home(m_vertex_mapping_copy)); + return m_raw_swap_list.size(); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp new file mode 100644 index 0000000000..4d510889ea --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/BestTsaTester.hpp @@ -0,0 +1,60 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "DecodedProblemData.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** Solves a fixed problem using the current best TSA. */ +class BestTsaTester { + public: + /** Computes a solution to the problem using our best TSA, + * checks it, and returns how many swaps it needed. + * The edges of the graph are directly taken from the list of swaps in the + * reference solution. + * @param data The problem data which was decoded from a string. + * @return The number of swaps returned by our TSA. The calculated swaps are + * also checked for correctness. + */ + size_t get_checked_solution_size(const DecodedProblemData& data); + + /** For problems where the architecture is NOT simply given implicitly + * by the swap sequence, so we must also pass in the complete set + * of edges, some of which might not appear in the final swaps. + * @param problem_data The data about a specific problem (calculated swaps, + * etc.) + * @param architecture_data Data about the architecture for the problem which + * is NOT deduced implicitly from the problem data itself (i.e., the edges). + * @return The number of swaps returned by our TSA. The calculated swaps are + * also checked for correctness. + */ + size_t get_checked_solution_size( + const DecodedProblemData& problem_data, + const DecodedArchitectureData& architecture_data); + + private: + SwapList m_raw_swap_list; + DecodedArchitectureData m_architecture_work_data; + std::vector> m_edges_vect; + VertexMapping m_vertex_mapping_copy; +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/DebugFunctions.cpp b/tket/tests/TokenSwapping/TestUtils/DebugFunctions.cpp new file mode 100644 index 0000000000..bc7aa5d063 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/DebugFunctions.cpp @@ -0,0 +1,52 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "DebugFunctions.hpp" + +#include + +namespace tket { +namespace tsa_internal { + +std::string str(const VertexMapping& vertex_mapping) { + std::stringstream ss; + ss << "VM:"; + for (const auto& entry : vertex_mapping) { + ss << " " << entry.first << "->" << entry.second << " "; + } + return ss.str(); +} + +std::string str(const SwapList& swaps) { return str(swaps.to_vector()); } + +std::string str(const std::vector& swaps) { + std::stringstream ss; + for (auto swap : swaps) { + ss << " (" << swap.first << "," << swap.second << ") "; + } + return ss.str(); +} + +size_t get_swaps_lower_bound( + const VertexMapping& vertex_mapping, + DistancesInterface& distances_calculator) { + // Each swap decreases the sum by at most 2 (and more likely 1 in many cases, + // if the mapping is sparse), so we need >= sum/2. But it's an integer of + // course. + return (get_total_home_distances(vertex_mapping, distances_calculator) + 1) / + 2; +} + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/DebugFunctions.hpp b/tket/tests/TokenSwapping/TestUtils/DebugFunctions.hpp new file mode 100644 index 0000000000..3d4dd0f9e8 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/DebugFunctions.hpp @@ -0,0 +1,65 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "TokenSwapping/DistanceFunctions.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { + +/** Get a string representation. + * @param vertex_mapping A mapping, usually representing a desired + * source->target mapping for a Token Swapping problem. + * @return A string representation. + */ +std::string str(const VertexMapping& vertex_mapping); + +/** Get a string representation. + * @param swaps An ordered list of swaps, usually the solution to a Token + * Swapping problem. + * @return A string representation. + */ +std::string str(const SwapList& swaps); + +/** Get a string representation. + * @param swaps An ordered list of swaps, usually the solution to a Token + * Swapping problem. + * @return A string representation. + */ +std::string str(const std::vector& swaps); + +/** A simple theoretical lower bound on the number of swaps necessary + * to achieve a given vertex mapping. (Of course it is not always possible + * to achieve this bound. But the algorithm in the 2016 paper + * "Approximation and Hardness of Token Swapping", for example, guarantees + * to find a solution within a factor of 4, or a factor of 2 for trees, + * in the case where every vertex has a token). + * TODO: What happens if some vertices are empty? Not considered in the 2016 + * paper! Need to think about it. This is still a lower bound, but how close? + * @param vertex_mapping current source->target mapping. + * @param distances An object to calculate distances between vertices. + * @return A number S such that every possible solution has >= S swaps. + * However, note that the true minimum value might be larger, but finding + * the value seems about as hard as finding an actual solution, and thus + * is possibly exponentially hard (seems to be unknown, even for trees). + */ +size_t get_swaps_lower_bound( + const VertexMapping& vertex_mapping, DistancesInterface& distances); + +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp new file mode 100644 index 0000000000..569243fd02 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.cpp @@ -0,0 +1,180 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "DecodedProblemData.hpp" + +#include + +#include "TokenSwapping/GeneralFunctions.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +/// TODO: move somewhere more appropriate. +// initially, "vm" has keys equal to the vertices with tokens; the values are +// ignored. Change to the desired source->target mapping, as used in all problem +// solving, induced by the swaps. Return the number of empty swaps. +static unsigned get_problem_mapping( + VertexMapping& vm, const vector& swaps) { + const auto init_num_tokens = vm.size(); + for (auto& entry : vm) { + entry.second = entry.first; + } + unsigned empty_swaps = 0; + for (auto swap : swaps) { + const VertexSwapResult result(swap, vm); + if (result.tokens_moved == 0) { + ++empty_swaps; + } + } + // Each time we had v1->t1, v2->t2 and we swapped v1,v2, we then got v1->t2, + // v2->t1. Thus, the KEY is a vertex, the VALUE is the token currently on that + // vertex. So, the VALUES are the tokens, which are the vertex it originally + // came from, i.e., it's end vertex -> original vertex. So our desired problem + // mapping source -> target is the REVERSE!! + vm = get_reversed_map(vm); + REQUIRE(init_num_tokens == vm.size()); + check_mapping(vm); + return empty_swaps; +} + +static const std::string& encoding_chars() { + static const std::string chars{ + "0123456789abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ"}; + return chars; +} + +static std::map get_char_to_vertex_map_local() { + std::map char_to_vertex_map; + const auto& chars = encoding_chars(); + for (size_t ii = 0; ii < chars.size(); ++ii) { + char_to_vertex_map[chars[ii]] = ii; + } + return char_to_vertex_map; +} + +static const std::map& char_to_vertex_map() { + static const std::map map( + get_char_to_vertex_map_local()); + return map; +} + +DecodedProblemData::DecodedProblemData( + const std::string& str, + RequireContiguousVertices require_contiguous_vertices) { + if (str.empty()) { + return; + } + + unsigned index = 0; + bool separator_found = false; + while (index < str.size()) { + if (str.at(index) == '_') { + ++index; + separator_found = true; + break; + } + const auto v1 = char_to_vertex_map().at(str.at(index)); + const auto v2 = char_to_vertex_map().at(str.at(index + 1)); + swaps.emplace_back(get_swap(v1, v2)); + index += 2; + } + + std::set vertices; + for (auto swap : swaps) { + vertices.insert(swap.first); + vertices.insert(swap.second); + } + CHECK(vertices.size() >= 4); + number_of_vertices = vertices.size(); + if (require_contiguous_vertices == RequireContiguousVertices::YES) { + REQUIRE(*vertices.crbegin() + 1 == vertices.size()); + } + + // Now set up the vertex mapping. Initially, all vertices with tokens + // have a token value equal to the vertex number. + vertex_mapping.clear(); + if (separator_found) { + unsigned num_tokens = 0; + for (; index < str.size(); ++index) { + const auto vv = char_to_vertex_map().at(str.at(index)); + if (require_contiguous_vertices == RequireContiguousVertices::YES) { + // It came from a swap sequence. Therefore, there are no extra edges, + // so every vertex must exist on a USED edge. + REQUIRE(vertices.count(vv) != 0); + } + vertex_mapping[vv]; + ++num_tokens; + } + REQUIRE(num_tokens == vertex_mapping.size()); + } else { + REQUIRE(index == str.size()); + for (auto vv : vertices) { + vertex_mapping[vv]; + } + } + // NOW, perform the swaps. + get_problem_mapping(vertex_mapping, swaps); +} + +DecodedArchitectureData::DecodedArchitectureData() : number_of_vertices(0) {} + +DecodedArchitectureData::DecodedArchitectureData( + const std::string& solution_edges_string) { + vector> neighbours(1); + std::set vertices_seen; + for (unsigned char ch : solution_edges_string) { + if (ch != ':') { + const auto new_v = char_to_vertex_map().at(ch); + neighbours.back().push_back(new_v); + vertices_seen.insert(new_v); + continue; + } + // We move onto the next vertex. + neighbours.emplace_back(); + } + // The last vertex N cannot have any neighbours j with j>N, + // so we don't bother to record it in the string, + // so it's not stored in "neighbours". + number_of_vertices = neighbours.size() + 1; + CHECK(number_of_vertices >= 4); + // But everything MUST be joined to something, if the graph is connected. + // Vertex v won't be listed if it only joins higher-numbered vertices, + // so many vertices might not be mentioned here. + REQUIRE(!vertices_seen.empty()); + REQUIRE(*vertices_seen.crbegin() <= neighbours.size()); + + for (size_t ii = 0; ii < neighbours.size(); ++ii) { + if (neighbours[ii].empty()) { + continue; + } + REQUIRE(std::is_sorted(neighbours[ii].cbegin(), neighbours[ii].cend())); + REQUIRE(neighbours[ii][0] > ii); + REQUIRE( + std::adjacent_find(neighbours[ii].cbegin(), neighbours[ii].cend()) == + neighbours[ii].cend()); + for (auto jj : neighbours[ii]) { + edges.insert(get_swap(ii, jj)); + } + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp new file mode 100644 index 0000000000..fa7d139570 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/DecodedProblemData.hpp @@ -0,0 +1,76 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "TokenSwapping/VertexMappingFunctions.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** For converting a raw string, representing a fixed sequence of swaps, + * into a problem for a TSA. */ +struct DecodedProblemData { + /** This is, of course, a possible SOLUTION to the problem, not part + * of the problem input data itself. Since we know at least one solution + * (this one), we can compare it with our returned solution + * to see how good it is. + */ + std::vector swaps; + + /** The desired source->target mapping for a problem. */ + VertexMapping vertex_mapping; + + size_t number_of_vertices; + + /** Do we require the vertex numbers to be {0,1,2,...,m}, with no gaps? */ + enum class RequireContiguousVertices { YES, NO }; + + explicit DecodedProblemData( + const std::string& str, + RequireContiguousVertices require_contiguous_vertices = + RequireContiguousVertices::YES); +}; + +/** For decoding strings like "1e:2d:3c:4b:5a:69:8:8:9:a:b:c:d:e" + * as seen in FixedCompleteSolutions, which encode + * the neighbours of vertices 0,1,2,...,N. + * Only edges(i,j) with i edges; + + /** The vertex numbers are contiguous, i.e. 0,1,2,...N for some N. */ + size_t number_of_vertices; + + /** Simply without filling any data. */ + DecodedArchitectureData(); + + /** Decodes and fills the data upon construction. + * @param solution_edges_string A string which encodes the edges of an + * architecture. + */ + explicit DecodedArchitectureData(const std::string& solution_edges_string); +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp new file mode 100644 index 0000000000..617a2ebc96 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.cpp @@ -0,0 +1,233 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "FullTsaTesting.hpp" + +#include + +#include "Architecture/ArchitectureMapping.hpp" +#include "Architecture/DistancesFromArchitecture.hpp" +#include "Architecture/NeighboursFromArchitecture.hpp" +#include "DebugFunctions.hpp" +#include "TokenSwapping/DistanceFunctions.hpp" +#include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +void FullTsaTesting::check_solution( + size_t counts_list_index, VertexMapping vertex_mapping, size_t lower_bound, + AllowEmptySwaps allow_empty_swaps) { + bool empty_swap_occurred = false; + REQUIRE(m_swap_list.size() >= lower_bound); + for (auto swap : m_swap_list.to_vector()) { + const VertexSwapResult swap_res(swap, vertex_mapping); + if (swap_res.tokens_moved == 0) { + empty_swap_occurred = true; + } + } + if (empty_swap_occurred && allow_empty_swaps == AllowEmptySwaps::NO) { + INFO( + "index=" << counts_list_index << ", " << vertex_mapping.size() + << " toks; lb=" << lower_bound << "; " << m_swap_list.size() + << " swaps"); + CHECK(false); + } + REQUIRE(all_tokens_home(vertex_mapping)); + auto& swaps = m_counts_list[counts_list_index].sorted_swaps; + swaps = m_swap_list.to_vector(); + std::sort(swaps.begin(), swaps.end()); +} + +void FullTsaTesting::check_equivalent_good_solution( + size_t existing_index, VertexMapping vertex_mapping, + AllowEmptySwaps allow_empty_swaps) { + check_solution( + m_counts_list.size() - 1, vertex_mapping, 0, allow_empty_swaps); + INFO("existing_index=" << existing_index); + CHECK( + m_counts_list[existing_index].sorted_swaps == + m_counts_list.back().sorted_swaps); +} + +void FullTsaTesting::test_order(size_t index1, size_t index2) const { + INFO("i1=" << index1 << ", i2=" << index2); + CHECK( + m_counts_list[index1].sorted_swaps.size() <= + m_counts_list[index2].sorted_swaps.size()); +} + +void FullTsaTesting::complete_counts_list_for_single_problem() { + size_t smallest_number = m_counts_list[0].sorted_swaps.size(); + + // Ignore the last index, which is a dummy. + for (size_t index = 0; index + 1 < m_counts_list.size(); ++index) { + auto& counts = m_counts_list[index]; + counts.total_swaps += counts.sorted_swaps.size(); + smallest_number = std::min(smallest_number, counts.sorted_swaps.size()); + } + // Now, we've got the (joint) winner. + size_t best_index = m_counts_list.size(); + size_t num_winners = 0; + + for (size_t index = 0; index + 1 < m_counts_list.size(); ++index) { + auto& counts = m_counts_list[index]; + REQUIRE(counts.sorted_swaps.size() >= smallest_number); + if (counts.sorted_swaps.size() == smallest_number) { + ++counts.problems_where_this_was_the_joint_winner; + ++num_winners; + best_index = index; + } + } + REQUIRE(num_winners >= 1); + if (num_winners == 1) { + ++m_counts_list[best_index].problems_where_this_was_the_clear_winner; + } +} + +FullTsaTesting::FullTsaTesting() { + m_counts_list.resize(7); + for (auto& entry : m_counts_list) { + entry.total_swaps = 0; + } +} + +void FullTsaTesting::add_problems( + const ArchitectureMapping& arch_mapping, + const vector& problems, const std::string& new_name, + RNG& rng, PartialTsaInterface& full_tsa) { + m_number_of_problems += problems.size(); + const std::string name_for_this = new_name + ":" + full_tsa.name(); + if (m_name.empty()) { + m_name = name_for_this; + } else { + if (m_name != name_for_this) { + m_name = m_name + ":" + name_for_this; + } + } + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + RiverFlowPathFinder path_finder(distances, neighbours, rng); + vector raw_calc_swaps; + VertexMapping problem_copy_to_destroy; + + for (size_t prob_index = 0; prob_index < problems.size(); ++prob_index) { + const auto& problem = problems[prob_index]; + const auto lower_bound = get_swaps_lower_bound(problem, distances); + m_number_of_tokens += problem.size(); + m_total_lower_bounds += lower_bound; + problem_copy_to_destroy = problem; + m_swap_list.clear(); + rng.set_seed(); + full_tsa.append_partial_solution( + m_swap_list, problem_copy_to_destroy, distances, neighbours, + path_finder); + raw_calc_swaps = m_swap_list.to_vector(); + + // Now, let's check the calculated swaps. + check_solution(0, problem, lower_bound, AllowEmptySwaps::NO); + + // Minimal travel optimising + m_optimiser.optimise_pass_with_zero_travel(m_swap_list); + check_solution(1, problem, lower_bound, AllowEmptySwaps::NO); + test_order(1, 0); + + //...add artificial token tracking...(remembering that empty swaps + // can be introduced, since it knows nothing about our tokens). + m_optimiser.optimise_pass_with_token_tracking(m_swap_list); + check_solution(2, problem, lower_bound, AllowEmptySwaps::YES); + test_order(2, 1); + + m_optimiser.optimise_pass_remove_empty_swaps(m_swap_list, problem); + check_solution(3, problem, lower_bound, AllowEmptySwaps::NO); + test_order(3, 2); + + m_optimiser.full_optimise(m_swap_list, problem); + check_solution(4, problem, lower_bound, AllowEmptySwaps::NO); + test_order(4, 3); + + // Now, test various equalities. + + // The token tracking pass, by itself, is the same whether or not + // we zero travel optimise first (which just makes things faster, + // not better). + m_swap_list.clear(); + for (auto swap : raw_calc_swaps) { + m_swap_list.push_back(swap); + } + m_optimiser.optimise_pass_with_token_tracking(m_swap_list); + m_optimiser.optimise_pass_with_frontward_travel(m_swap_list); + // Is 5 the same as 2? No! Usually the same, but NOT always; + // e.g. a test with random trees found a small difference. + check_solution(5, problem, lower_bound, AllowEmptySwaps::YES); + + // Swap travels permute the swaps, but otherwise reduce them + // no more than zero travel. + m_swap_list.clear(); + for (auto swap : raw_calc_swaps) { + m_swap_list.push_back(swap); + } + m_optimiser.optimise_pass_with_frontward_travel(m_swap_list); + check_equivalent_good_solution(1, problem, AllowEmptySwaps::NO); + + // full optimise is no better when combined + // with other passes. + m_swap_list.clear(); + for (auto swap : raw_calc_swaps) { + m_swap_list.push_back(swap); + } + m_optimiser.full_optimise(m_swap_list); + check_equivalent_good_solution(2, problem, AllowEmptySwaps::YES); + m_optimiser.optimise_pass_with_token_tracking(m_swap_list); + check_equivalent_good_solution(2, problem, AllowEmptySwaps::YES); + + m_swap_list.clear(); + for (auto swap : raw_calc_swaps) { + m_swap_list.push_back(swap); + } + m_optimiser.full_optimise(m_swap_list, problem); + check_equivalent_good_solution(4, problem, AllowEmptySwaps::NO); + + complete_counts_list_for_single_problem(); + } +} + +std::string FullTsaTesting::str() const { + std::stringstream ss; + ss << "[" << m_name << ": " << m_number_of_problems << " probs; " + << m_number_of_tokens << " toks; " << m_total_lower_bounds + << " tot.lb]\n[Total swaps:"; + // The last entry is a "dummy". + for (size_t index = 0; index + 1 < m_counts_list.size(); ++index) { + ss << " " << m_counts_list[index].total_swaps; + } + ss << "]\n[Winners: joint:"; + for (size_t index = 0; index + 1 < m_counts_list.size(); ++index) { + ss << " " << m_counts_list[index].problems_where_this_was_the_joint_winner; + } + ss << " undisputed:"; + for (size_t index = 0; index + 1 < m_counts_list.size(); ++index) { + ss << " " << m_counts_list[index].problems_where_this_was_the_clear_winner; + } + ss << "]"; + return ss.str(); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp new file mode 100644 index 0000000000..3592b1c487 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/FullTsaTesting.hpp @@ -0,0 +1,87 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Architecture/ArchitectureMapping.hpp" +#include "TokenSwapping/PartialTsaInterface.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" +#include "Utils/RNG.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +// Only for testing FULL TSAs, which guarantee to find a solution. +class FullTsaTesting { + public: + FullTsaTesting(); + + /// Will use the RiverFlowPathFinder + /// (which needs an RNG). + void add_problems( + const ArchitectureMapping& arch_mapping, + const std::vector& problems, const std::string& name, + RNG& rng, PartialTsaInterface& full_tsa); + + /// A summary of the statistics. + std::string str() const; + + private: + // For various optimisation passes, we check how well they did, + // and we record when a particular one beats + struct Counts { + size_t total_swaps = 0; + size_t problems_where_this_was_the_joint_winner = 0; + size_t problems_where_this_was_the_clear_winner = 0; + + // Reset this with each new calculated solution; this checks whether + // newly calculated solutions really are just a permutation of an existing + // solution. + std::vector sorted_swaps; + }; + + size_t m_total_lower_bounds = 0; + size_t m_number_of_problems = 0; + size_t m_number_of_tokens = 0; + SwapList m_swap_list; + SwapListOptimiser m_optimiser; + std::vector m_counts_list; + std::string m_name; + std::string m_prev_tsa_name; + + enum class AllowEmptySwaps { YES, NO }; + + // Check that the swaps currently stored in m_swap_list are correct, + // and store the data in m_counts_list (if the index is not too big). + void check_solution( + size_t counts_list_index, VertexMapping vertex_mapping, + size_t lower_bound, AllowEmptySwaps allow_empty_swaps); + + // Check that the swaps currently stored in m_swap_list are correct. + // Check also that they are a reordering of those + // already calculated and stored in m_counts_list, at the given index. + void check_equivalent_good_solution( + size_t existing_index, VertexMapping vertex_mapping, + AllowEmptySwaps allow_empty_swaps); + + // In m_counts_list, the swaps for i1 should be <= the swaps for i2. + void test_order(size_t index1, size_t index2) const; + + void complete_counts_list_for_single_problem(); +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/GetRandomSet.cpp b/tket/tests/TokenSwapping/TestUtils/GetRandomSet.cpp new file mode 100644 index 0000000000..796cdfc6e7 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/GetRandomSet.cpp @@ -0,0 +1,54 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "GetRandomSet.hpp" + +#include + +#include "Utils/Assert.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +std::set get_random_set( + RNG& rng, size_t sample_size, size_t population_size) { + TKET_ASSERT(sample_size <= population_size); + + std::set result; + if (sample_size == 0 || population_size == 0) { + return result; + } + if (sample_size < population_size / 2) { + while (result.size() < sample_size) { + result.insert(rng.get_size_t(population_size - 1)); + } + return result; + } + std::vector elems(population_size); + std::iota(elems.begin(), elems.end(), 0); + rng.do_shuffle(elems); + for (const auto& elem : elems) { + result.insert(elem); + if (result.size() == sample_size) { + return result; + } + } + TKET_ASSERT(!"get_random_set: dropped out of loop"); + return result; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/GetRandomSet.hpp b/tket/tests/TokenSwapping/TestUtils/GetRandomSet.hpp new file mode 100644 index 0000000000..1215a92657 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/GetRandomSet.hpp @@ -0,0 +1,37 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "Utils/RNG.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +/** Return a random subset of given size from the population {0,1,2,...,N}. + * @param rng A random number generator. + * @param sample_size The desired size of the returned set. + * @param population_size The number of elements in the population (an interval + * of nonnegative integers, starting at 0). + * @return A set of numbers. Throws upon invalid parameters. + */ +std::set get_random_set( + RNG& rng, size_t sample_size, size_t population_size); + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp new file mode 100644 index 0000000000..0a6764ceca --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.cpp @@ -0,0 +1,149 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "PartialTsaTesting.hpp" + +#include + +#include "Architecture/DistancesFromArchitecture.hpp" +#include "Architecture/NeighboursFromArchitecture.hpp" +#include "TestStatsStructs.hpp" +#include "TokenSwapping/DistanceFunctions.hpp" +#include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "TokenSwapping/VertexSwapResult.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +// Also checks if an empty token pair swap occurs. +static size_t get_recalculated_final_L( + VertexMapping problem, const SwapList& swap_list, + DistancesInterface& distances, TokenOption token_option) { + bool empty_tok_swap = false; + + for (auto id_opt = swap_list.front_id(); id_opt; + id_opt = swap_list.next(id_opt)) { + const auto swap = swap_list.at(id_opt.value()); + const VertexSwapResult swap_result(swap, problem); + if (swap_result.tokens_moved == 0 && + token_option == TokenOption::DO_NOT_ALLOW_EMPTY_TOKEN_SWAP) { + empty_tok_swap = true; + } + } + if (empty_tok_swap) { + REQUIRE(false); + } + return get_total_home_distances(problem, distances); +} + +static void check_progress( + size_t init_L, size_t final_L, RequiredTsaProgress progress) { + REQUIRE(final_L <= init_L); + switch (progress) { + case RequiredTsaProgress::FULL: + REQUIRE(final_L == 0); + return; + case RequiredTsaProgress::NONZERO: + if (init_L > 0) { + REQUIRE(final_L < init_L); + } + // Fall through + case RequiredTsaProgress::NONE: + return; + default: + REQUIRE(false); + } +} + +static std::string run_tests( + const std::vector& problems, DistancesInterface& distances, + NeighboursInterface& neighbours, RiverFlowPathFinder& path_finder, + PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, + TokenOption token_option) { + REQUIRE(!problems.empty()); + PartialTsaStatistics statistics; + SwapList swap_list; + + for (const auto& problem : problems) { + const auto init_L = get_total_home_distances(problem, distances); + swap_list.clear(); + + // Will be destructively altered + auto problem_copy = problem; + path_finder.reset(); + partial_tsa.append_partial_solution( + swap_list, problem_copy, distances, neighbours, path_finder); + + const auto final_L = get_total_home_distances(problem_copy, distances); + check_progress(init_L, final_L, progress); + + REQUIRE( + get_recalculated_final_L(problem, swap_list, distances, token_option) == + final_L); + + statistics.add_problem_result( + init_L, final_L, problem.size(), swap_list.size()); + } + std::stringstream ss; + ss << "[TSA=" << partial_tsa.name(); + switch (progress) { + case RequiredTsaProgress::FULL: + ss << " FULL"; + break; + + case RequiredTsaProgress::NONZERO: + ss << " NONZERO"; + break; + + // Fall through + case RequiredTsaProgress::NONE: + default: + break; + } + ss << " PF=RiverFlow\n" << statistics.str(problems.size()) << "]"; + return ss.str(); +} + +std::string run_tests( + const ArchitectureMapping& arch_mapping, + const std::vector& problems, + RiverFlowPathFinder& path_finder, PartialTsaInterface& partial_tsa, + RequiredTsaProgress progress, TokenOption token_option) { + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + return run_tests( + problems, distances, neighbours, path_finder, partial_tsa, progress, + token_option); +} + +std::string run_tests( + const ArchitectureMapping& arch_mapping, + const std::vector& problems, RNG& rng, + PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, + TokenOption token_option) { + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + RiverFlowPathFinder path_finder(distances, neighbours, rng); + + return run_tests( + problems, distances, neighbours, path_finder, partial_tsa, progress, + token_option); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp new file mode 100644 index 0000000000..208b44d1f0 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/PartialTsaTesting.hpp @@ -0,0 +1,49 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Architecture/ArchitectureMapping.hpp" +#include "TokenSwapping/PartialTsaInterface.hpp" +#include "Utils/RNG.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +enum class RequiredTsaProgress { NONE, FULL, NONZERO }; +enum class TokenOption { + ALLOW_EMPTY_TOKEN_SWAP, + DO_NOT_ALLOW_EMPTY_TOKEN_SWAP +}; + +/// Returns a summary string of the results, as well as doing the checks. +std::string run_tests( + const ArchitectureMapping& arch_mapping, + const std::vector& problems, + RiverFlowPathFinder& path_finder, PartialTsaInterface& partial_tsa, + RequiredTsaProgress progress, + TokenOption token_option = TokenOption::DO_NOT_ALLOW_EMPTY_TOKEN_SWAP); + +/// If no path finder is specified, will use the RiverFlowPathFinder +/// (which needs an RNG). +std::string run_tests( + const ArchitectureMapping& arch_mapping, + const std::vector& problems, RNG& rng, + PartialTsaInterface& partial_tsa, RequiredTsaProgress progress, + TokenOption token_option = TokenOption::DO_NOT_ALLOW_EMPTY_TOKEN_SWAP); + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp new file mode 100644 index 0000000000..2e2012aeae --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.cpp @@ -0,0 +1,129 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ProblemGeneration.hpp" + +#include + +#include "GetRandomSet.hpp" +#include "TokenSwapping/GeneralFunctions.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +TSProblemParameters00::TSProblemParameters00() + : token_density_percentage(10), + min_number_of_tokens(1), + max_number_of_tokens(10000) {} + +VertexMapping TSProblemParameters00::get_problem( + RNG& rng, unsigned number_of_vertices) const { + unsigned number_of_tokens = + (token_density_percentage * number_of_vertices) / 100; + number_of_tokens = std::max(number_of_tokens, min_number_of_tokens); + number_of_tokens = std::min(number_of_tokens, number_of_vertices); + number_of_tokens = std::min(number_of_tokens, max_number_of_tokens); + + VertexMapping vertex_mapping; + const auto tokens = get_random_set(rng, number_of_tokens, number_of_vertices); + const auto targets_set = + get_random_set(rng, number_of_tokens, number_of_vertices); + REQUIRE(tokens.size() == number_of_tokens); + REQUIRE(targets_set.size() == number_of_tokens); + vector targets{targets_set.cbegin(), targets_set.cend()}; + for (auto token : tokens) { + vertex_mapping[token] = rng.get_and_remove_element(targets); + } + REQUIRE(targets.empty()); + REQUIRE(vertex_mapping.size() == number_of_tokens); + return vertex_mapping; +} + +ProblemGenerator00::ProblemGenerator00() + : init_token_density_percentage(1), final_percentage(100), step(1) {} + +vector ProblemGenerator00::get_problems( + const std::string& arch_name, unsigned number_of_vertices, RNG& rng, + // It will calculate a short summary string of the problems + // and check against this string; this helps to detect + // accidentally changed parameters/generation algorithms + // leading to different tests. + const std::string& expected_summary) const { + REQUIRE(step > 0); + + TSProblemParameters00 params; + vector vertex_mappings; + + // This will probably detect if the rng changes, or has different seed + auto code = rng.get_size_t(255); + unsigned tokens_count = 0; + for (params.token_density_percentage = init_token_density_percentage; + params.token_density_percentage <= final_percentage; + params.token_density_percentage += step) { + vertex_mappings.push_back(params.get_problem(rng, number_of_vertices)); + tokens_count += vertex_mappings.back().size(); + } + code = (code << 8) + rng.get_size_t(255); + std::stringstream ss; + ss << "[" << arch_name << ": " << code << ": v" << number_of_vertices << " i" + << init_token_density_percentage << " f" << final_percentage << " s" + << step << ": " << vertex_mappings.size() << " problems; " << tokens_count + << " tokens]"; + CHECK(ss.str() == expected_summary); + return vertex_mappings; +} + +RandomTreeGenerator00::RandomTreeGenerator00() + : min_number_of_children(1), + max_number_of_children(3), + approx_number_of_vertices(10) {} + +// Creates the edges of a random tree with vertex 0 being the root. +vector> RandomTreeGenerator00::get_tree_edges( + RNG& rng) const { + REQUIRE(max_number_of_children > min_number_of_children); + REQUIRE(max_number_of_children > 1); + REQUIRE(approx_number_of_vertices >= 3); + // The vertices awaiting child nodes to be assigned. + work_vector.resize(1); + work_vector[0] = 0; + + vector> edges; + for (auto infinite_loop_guard = 100 + 100 * approx_number_of_vertices; + infinite_loop_guard > 0; --infinite_loop_guard) { + const auto number_of_children = + rng.get_size_t(min_number_of_children, max_number_of_children); + const unsigned node = rng.get_and_remove_element(work_vector); + for (unsigned ii = 0; ii < number_of_children; ++ii) { + const unsigned new_vertex = edges.size() + 1; + work_vector.push_back(new_vertex); + edges.emplace_back(node, new_vertex); + if (edges.size() + 1 >= approx_number_of_vertices) { + return edges; + } + } + if (work_vector.empty()) { + return edges; + } + } + REQUIRE(false); + return edges; +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp new file mode 100644 index 0000000000..a84e41ad42 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/ProblemGeneration.hpp @@ -0,0 +1,78 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Architecture/Architecture.hpp" +#include "TokenSwapping/VertexMappingFunctions.hpp" +#include "Utils/RNG.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +struct TSProblemParameters00 { + // How many tokens are there, as a percentage of the number of vertices? + // Will still work if above 100, just gets truncated to 100%. + unsigned token_density_percentage; + + // For very small graphs, ensure a minimum number of tokens. + unsigned min_number_of_tokens; + unsigned max_number_of_tokens; + + TSProblemParameters00(); + + // Using the above problem parameters + VertexMapping get_problem(RNG& rng, unsigned number_of_vertices) const; +}; + +// Given an architecture, generate various test problems +// with varying numbers of tokens. +struct ProblemGenerator00 { + unsigned init_token_density_percentage; + unsigned final_percentage; + unsigned step; + + ProblemGenerator00(); + + std::vector get_problems( + const std::string& arch_name, unsigned number_of_vertices, RNG& rng, + // It will calculate a short summary string of the problems + // and check against this string; this helps to detect + // accidentally changed parameters/generation algorithms + // leading to different tests. + const std::string& expected_summary) const; +}; + +struct RandomTreeGenerator00 { + // Every finite tree must have a leaf! + // So, some vertices will end up being leaves (having no children), + // even if the min is nonzero. + unsigned min_number_of_children; + unsigned max_number_of_children; + unsigned approx_number_of_vertices; + mutable std::vector work_vector; + + RandomTreeGenerator00(); + + // Creates the edges of a random tree with vertices {0,1,2,...} with + // vertex 0 being the root. + // It might not find exactly the requested number of vertices. + // Note that (number of vertices) == (number of edges+1), for a tree. + std::vector> get_tree_edges(RNG& rng) const; +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp new file mode 100644 index 0000000000..7f45186ba7 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.cpp @@ -0,0 +1,70 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TestStatsStructs.hpp" + +#include +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +void MinMaxAv::add(size_t result) { + min = std::min(min, result); + max = std::max(max, result); + total += result; +} + +void PartialTsaStatistics::add_problem_result( + size_t initial_L, size_t final_L, size_t tokens, size_t swaps) { + REQUIRE(final_L <= initial_L); + REQUIRE(final_L + 2 * swaps >= initial_L); + total_number_of_tokens += tokens; + if (initial_L == 0) { + CHECK(swaps == 0); + l_decrease_percentages.add(100); + powers.add(100); + return; + } + ++number_of_problems; + total_of_L += initial_L; + const size_t l_decrease = initial_L - final_L; + total_of_L_decreases += l_decrease; + + l_decrease_percentages.add((100 * (initial_L - final_L)) / initial_L); + total_number_of_swaps += swaps; + if (swaps == 0) { + powers.add(0); + } else { + powers.add((50 * l_decrease) / swaps); + } +} + +std::string PartialTsaStatistics::str(size_t number_of_problems) const { + REQUIRE(number_of_problems != 0); + std::stringstream ss; + ss << total_number_of_tokens << " tokens; " << total_of_L << " total L; " + << total_number_of_swaps << " swaps.\nL-decr %: min " + << l_decrease_percentages.min << ", max " << l_decrease_percentages.max + << ", av " << l_decrease_percentages.total / number_of_problems + << ".\nPower %: min " << powers.min << ", max " << powers.max << ", av " + << powers.total / number_of_problems; + return ss.str(); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp new file mode 100644 index 0000000000..efaaedd429 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/TestStatsStructs.hpp @@ -0,0 +1,54 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +namespace tket { +namespace tsa_internal { +namespace tests { + +struct MinMaxAv { + size_t min = std::numeric_limits::max(); + size_t max = 0; + size_t total = 0; + + void add(size_t result); +}; + +struct PartialTsaStatistics { + size_t number_of_problems = 0; + size_t total_of_L = 0; + size_t total_of_L_decreases = 0; + size_t total_number_of_tokens = 0; + size_t total_number_of_swaps = 0; + + MinMaxAv l_decrease_percentages; + + // The "power" of a swap sequence (with given token configuration) + // is defined to be (decrease in L)/(number of swaps). + // Thus, it's always between 0 and 2 (if all swaps make progress). + // However, we multiply by 50, to make the power between 0 and 100%. + MinMaxAv powers; + + void add_problem_result( + size_t initial_L, size_t final_L, size_t tokens, size_t swaps); + + std::string str(size_t number_of_problems) const; +}; + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/TestUtils/test_DebugFunctions.cpp b/tket/tests/TokenSwapping/TestUtils/test_DebugFunctions.cpp new file mode 100644 index 0000000000..47d89eefa6 --- /dev/null +++ b/tket/tests/TokenSwapping/TestUtils/test_DebugFunctions.cpp @@ -0,0 +1,44 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "DebugFunctions.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("debug functions - string functions") { + const VertexMapping vm{{0, 1}, {1, 2}, {3, 5}}; + CHECK(str(vm) == "VM: 0->1 1->2 3->5 "); + + vector swaps_vect; + swaps_vect.push_back(get_swap(111, 222)); + swaps_vect.push_back(get_swap(5555, 4444)); + const auto swaps_vect_str = str(swaps_vect); + CHECK(swaps_vect_str == " (111,222) (4444,5555) "); + + SwapList swaps; + for (const auto& swap : swaps_vect) { + swaps.push_back(swap); + } + CHECK(swaps_vect_str == str(swaps)); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp new file mode 100644 index 0000000000..7c83c83152 --- /dev/null +++ b/tket/tests/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp @@ -0,0 +1,109 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "Architecture/ArchitectureMapping.hpp" +#include "Architecture/DistancesFromArchitecture.hpp" +#include "Architecture/NeighboursFromArchitecture.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("Simple path") { + const vector> edges{ + {111, 222}, {555, 444}, {333, 222}, {777, 666}, {333, 444}, {666, 555}}; + const unsigned n_verts = edges.size() + 1; + std::stringstream ss; + ss << "Original input edges:\n"; + for (auto edge : edges) { + ss << "(" << edge.first << "," << edge.second << ") "; + } + const Architecture arch(edges); + const ArchitectureMapping arch_mapping(arch, edges); + + ss << "...\nEdges from arch.mapping:\n"; + for (auto edge : arch_mapping.get_edges()) { + ss << "(" << edge.first << "," << edge.second << ") "; + } + ss << "...\nVertex-to-node:"; + + for (unsigned vv = 0; vv < n_verts; ++vv) { + const auto node = arch_mapping.get_node(vv); + REQUIRE(vv == arch_mapping.get_vertex(node)); + ss << "\n" << vv << " == " << node.repr(); + } + ss << "...\nDistances:"; + + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + + for (unsigned ii = 0; ii < n_verts; ++ii) { + ss << "\n" << ii << ": ["; + for (unsigned jj = ii + 1; jj < n_verts; ++jj) { + REQUIRE(0 == distances(ii, ii)); + const auto dist = distances(ii, jj); + ss << " " << dist; + REQUIRE(dist == distances(jj, ii)); + } + ss << "]"; + } + ss << "\nNeighbours:"; + for (unsigned ii = 0; ii < n_verts; ++ii) { + ss << "\n" << ii << ": ["; + const auto& neighb = neighbours(ii); + for (auto nn : neighb) { + ss << " " << nn; + } + ss << " ]"; + } + CHECK( + ss.str() == + "Original input edges:\n" + "(111,222) (555,444) (333,222) (777,666) (333,444) (666,555) ...\n" + "Edges from arch.mapping:\n" + "(0,1) (2,3) (1,4) (5,6) (3,4) (2,6) ...\n" + "Vertex-to-node:\n" + "0 == node[111]\n" + "1 == node[222]\n" + "2 == node[555]\n" + "3 == node[444]\n" + "4 == node[333]\n" + "5 == node[777]\n" + "6 == node[666]...\n" + "Distances:\n" + "0: [ 1 4 3 2 6 5]\n" + "1: [ 3 2 1 5 4]\n" + "2: [ 1 2 2 1]\n" + "3: [ 1 3 2]\n" + "4: [ 4 3]\n" + "5: [ 1]\n" + "6: []\n" + "Neighbours:\n" + "0: [ 1 ]\n" + "1: [ 0 4 ]\n" + "2: [ 3 6 ]\n" + "3: [ 2 4 ]\n" + "4: [ 1 3 ]\n" + "5: [ 6 ]\n" + "6: [ 2 5 ]"); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp new file mode 100644 index 0000000000..1bcd0ca242 --- /dev/null +++ b/tket/tests/TokenSwapping/test_BestTsaFixedSwapSequences.cpp @@ -0,0 +1,386 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "Data/FixedCompleteSolutions.hpp" +#include "Data/FixedSwapSequences.hpp" +#include "TestUtils/BestTsaTester.hpp" + +/// TODO: The swap table optimiser currently tries to optimise many segments; +/// certainly it could be cut down, experimentation is needed +/// to find how much to cut it down, without degrading solution +/// quality too much. +// + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { +struct FixedSeqsStats { + size_t equivalent_solns = 0; + size_t equivalent_solns_swaps = 0; + size_t better_solns = 0; + size_t better_solns_swaps = 0; + size_t better_solns_known_swaps = 0; + size_t better_solns_total_swap_diff = 0; + size_t better_solns_percent_decr_total = 0; + size_t worse_solns = 0; + size_t worse_solns_swaps = 0; + size_t worse_solns_known_swaps = 0; + size_t worse_solns_total_swap_diff = 0; + size_t worse_solns_percent_incr_total = 0; + + void add(size_t known_size, size_t calc_size) { + if (known_size == calc_size) { + ++equivalent_solns; + equivalent_solns_swaps += known_size; + return; + } + if (calc_size < known_size) { + ++better_solns; + better_solns_swaps += calc_size; + better_solns_known_swaps += known_size; + const auto decr = known_size - calc_size; + better_solns_total_swap_diff += decr; + better_solns_percent_decr_total += (decr * 100) / known_size; + return; + } + ++worse_solns; + worse_solns_swaps += calc_size; + worse_solns_known_swaps += known_size; + const auto incr = calc_size - known_size; + worse_solns_total_swap_diff += incr; + worse_solns_percent_incr_total += (incr * 100) / known_size; + } + + std::string str() const { + std::stringstream ss; + size_t good_soln_av_decr = 0; + if (better_solns > 0) { + good_soln_av_decr = better_solns_percent_decr_total / better_solns; + } + size_t bad_soln_av_incr = 0; + if (worse_solns > 0) { + bad_soln_av_incr = worse_solns_percent_incr_total / worse_solns; + } + + ss << "[" << equivalent_solns << " equal (" << equivalent_solns_swaps + << "); " << better_solns << " BETTER (" << better_solns_swaps << " vs " + << better_solns_known_swaps << "): av " << good_soln_av_decr + << "% decr\n" + << worse_solns << " WORSE (" << worse_solns_swaps << " vs " + << worse_solns_known_swaps << "): av " << bad_soln_av_incr << "% incr]"; + return ss.str(); + } +}; +} // namespace + +static void check_overall_percentage_improvement( + unsigned total_number_of_problems, unsigned total_calc_swaps, + unsigned total_orig_swaps, double expected_percentage) { + const double actual_decrease = + 100.0 - (100.0 * total_calc_swaps) / (double)total_orig_swaps; + if (std::abs(actual_decrease - expected_percentage) < 1e-4) { + return; + } + INFO( + "Solved " << total_number_of_problems + << " problems; known solutions have total swaps " + << total_orig_swaps << ". We calculated " << total_calc_swaps + << ", giving percentage decrease " << actual_decrease + << ". But we expected " << expected_percentage); + CHECK(false); +} + +namespace { +struct Summary { + std::string str; + unsigned total_calc_swaps; + unsigned total_orig_swaps; + unsigned total_number_of_problems; + + Summary( + const vector& encoded_swap_sequences, BestTsaTester& tester) + : total_calc_swaps(0), total_orig_swaps(0), total_number_of_problems(0) { + FixedSeqsStats stats; + for (const auto& code_str : encoded_swap_sequences) { + const DecodedProblemData data(code_str); + const auto known_size = data.swaps.size(); + REQUIRE(known_size > 0); + try { + const auto calc_soln_size = tester.get_checked_solution_size(data); + stats.add(known_size, calc_soln_size); + total_calc_swaps += calc_soln_size; + total_orig_swaps += known_size; + ++total_number_of_problems; + } catch (const std::exception& e) { + INFO( + "Swap seq encoding string '" + << code_str << "'\n...encoded " << data.swaps.size() << " swaps, " + << data.vertex_mapping.size() << " tokens on " + << data.number_of_vertices + << " vertices. Gave error: " << e.what()); + REQUIRE(false); + } + } + str = stats.str(); + } + + void check_overall_improvement(double expected_percentage) const { + check_overall_percentage_improvement( + total_number_of_problems, total_calc_swaps, total_orig_swaps, + expected_percentage); + } +}; +} // namespace + +SCENARIO("Best TSA : solve problems from fixed swap sequences") { + FixedSwapSequences sequences; + CHECK(sequences.full.size() == 453); + CHECK(sequences.partial.size() == 755); + +#ifdef TKET_TESTS_FULL + // The "long" tests take ~6 seconds on an ordinary 2021 Windows laptop. + const std::string full_seq_str = + "[248 equal (6088); 104 BETTER (4645 vs 4979): av 7% decr\n" + "101 WORSE (5893 vs 5451): av 8% incr]"; + + // The fixed swap sequences have been optimised quite a lot already, + // so are probably quite close to optimal (although we cannot know + // for sure without an exhaustive search; there is probably no known + // non-exponential time algorithm to find the optimal solution). + // So, (probably) getting within 1% of the optimal answer seems pretty good. + const double full_seq_improvement = -0.653832; + + const std::string partial_seq_str = + "[455 equal (6487); 165 BETTER (7044 vs 7457): av 7% decr\n" + "135 WORSE (9124 vs 8604): av 6% incr]"; + const double partial_seq_improvement = -0.474543; +#else + // The reduced tests take ~50 milliseconds + // (and are also biased towards smaller problems, + // as the problem strings are sorted by length). + sequences.full.resize(40); + const std::string full_seq_str = + "[40 equal (231); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]"; + const double full_seq_improvement = 0.0; + + sequences.partial.resize(40); + const std::string partial_seq_str = + "[40 equal (166); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]"; + const double partial_seq_improvement = 0.0; +#endif + + BestTsaTester tester; + const Summary full_seqs_summary(sequences.full, tester); + CHECK(full_seqs_summary.total_number_of_problems == sequences.full.size()); + CHECK(full_seqs_summary.str == full_seq_str); + full_seqs_summary.check_overall_improvement(full_seq_improvement); + + const Summary partial_seqs_summary(sequences.partial, tester); + CHECK( + partial_seqs_summary.total_number_of_problems == + sequences.partial.size()); + CHECK(partial_seqs_summary.str == partial_seq_str); + partial_seqs_summary.check_overall_improvement(partial_seq_improvement); +} + +// Now we want to solve complete problems; this is one of +// our most important tests. It is a bit silly +// to put problems with 5 vertices and problems with +// 50 vertices in the same test. Therefore, we crudely sort by length of +// encoding string, which is roughly "problem size", +// and distribute the final statistics amongst a number of categories +// based upon problem size. +namespace { +class StatisticsGrouper { + public: + StatisticsGrouper( + unsigned number_of_messages, + const vector& sorted_problem_sizes) { + REQUIRE(number_of_messages >= 3); + REQUIRE(sorted_problem_sizes.size() >= 5 * number_of_messages); + REQUIRE(sorted_problem_sizes[0] >= 5); + m_stats.resize(number_of_messages); + m_problem_size_boundaries.resize(number_of_messages); + const unsigned step = sorted_problem_sizes.size() / number_of_messages; + for (unsigned ii = 0; ii + 1 < number_of_messages; ++ii) { + m_problem_size_boundaries[ii] = sorted_problem_sizes[(ii + 1) * step]; + } + m_problem_size_boundaries.back() = sorted_problem_sizes.back() + 1; + } + + void add( + const std::string& problem_str, + const DecodedArchitectureData& arch_data) { + unsigned allowed_index = 0; + for (unsigned index = 0; index < m_problem_size_boundaries.size(); + ++index) { + if (problem_str.size() <= m_problem_size_boundaries[index]) { + allowed_index = index; + break; + } + } + // Now we know which category it's in, so do the calculation + auto& stats = m_stats[allowed_index]; + const DecodedProblemData data( + problem_str, DecodedProblemData::RequireContiguousVertices::NO); + const auto known_size = data.swaps.size(); + REQUIRE(known_size > 0); + try { + const auto calc_soln_size = + m_tester.get_checked_solution_size(data, arch_data); + stats.add(known_size, calc_soln_size); + m_total_calc_swaps += calc_soln_size; + m_total_orig_swaps += known_size; + ++m_total_number_of_problems; + } catch (const std::exception& e) { + INFO( + "Swap seq encoding string '" << problem_str << "'\n...encoded " + << data.swaps.size() + << " swaps, error: " << e.what()); + CHECK(false); + } + } + + vector get_final_messages() const { + vector messages(m_stats.size()); + for (unsigned ii = 0; ii < m_stats.size(); ++ii) { + messages[ii] = m_stats[ii].str(); + } + return messages; + } + + void check_overall_improvement(double expected_percentage) const { + check_overall_percentage_improvement( + m_total_number_of_problems, m_total_calc_swaps, m_total_orig_swaps, + expected_percentage); + } + + private: + unsigned m_total_calc_swaps = 0; + unsigned m_total_orig_swaps = 0; + unsigned m_total_number_of_problems = 0; + BestTsaTester m_tester; + vector m_stats; + vector m_problem_size_boundaries; +}; +} // namespace + +SCENARIO("Best TSA : solve complete problems") { + FixedCompleteSolutions complete_solutions; + + // It's a map, with key the architecture name; this is the number + // of architectures, not problems. + CHECK(complete_solutions.solutions.size() == 21); + vector sizes; + for (const auto& entry : complete_solutions.solutions) { + sizes.push_back(entry.second.size()); + } + CHECK(sizes == vector{49, 97, 49, 49, 97, 93, 45, 45, 45, 39, 41, + 49, 39, 100, 48, 28, 22, 27, 49, 49, 38}); + + // For a good test, very different problems should not be amalgamated + // in the statistics. Thus we determine the different categories using length + // of encoding string, which presumably roughly corresponds to "problem size" + // and problem hardness. + +#ifdef TKET_TESTS_FULL + // The "long" tests take ~12 seconds on an ordinary 2021 Windows laptop. + const vector expected_messages{ + "[210 equal (1018); 19 BETTER (84 vs 111): av 24% decr\n" + "2 WORSE (19 vs 15): av 26% incr]", + + "[145 equal (1822); 39 BETTER (451 vs 525): av 13% decr\n" + "17 WORSE (269 vs 242): av 11% incr]", + + "[58 equal (1619); 122 BETTER (3465 vs 3832): av 9% decr\n" + "34 WORSE (1321 vs 1232): av 6% incr]", + + "[18 equal (1382); 114 BETTER (8322 vs 8856): av 5% decr\n" + "83 WORSE (6875 vs 6457): av 5% incr]", + + "[8 equal (1470); 164 BETTER (25183 vs 27141): av 6% decr\n" + "44 WORSE (8722 vs 8384): av 3% incr]"}; + + const double expected_improvement = 3.25087; +#else + // The reduced tests take ~700 milliseconds. + for (auto& entry : complete_solutions.solutions) { + auto reduced_size = entry.second.size() / 10; + if (reduced_size < 4) { + reduced_size = 4; + } + if (reduced_size < entry.second.size()) { + entry.second.resize(reduced_size); + } + } + const vector expected_messages{ + "[18 equal (62); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]", + + "[17 equal (82); 0 BETTER (0 vs 0): av 0% decr\n" + "0 WORSE (0 vs 0): av 0% incr]", + + "[12 equal (119); 2 BETTER (15 vs 18): av 16% decr\n" + "0 WORSE (0 vs 0): av 0% incr]", + + "[6 equal (149); 6 BETTER (164 vs 173): av 5% decr\n" + "4 WORSE (115 vs 110): av 5% incr]", + + "[4 equal (163); 10 BETTER (535 vs 571): av 5% decr\n" + "5 WORSE (288 vs 273): av 5% incr]"}; + const double expected_improvement = 1.62791; +#endif + + vector problem_sizes; + for (const auto& entry : complete_solutions.solutions) { + REQUIRE(entry.second.size() >= 2); + // The first string encodes the edges in that architecture, + // rather than a problem. + for (unsigned ii = 1; ii < entry.second.size(); ++ii) { + problem_sizes.push_back(entry.second[ii].size()); + } + } + std::sort(problem_sizes.begin(), problem_sizes.end()); + StatisticsGrouper grouper(expected_messages.size(), problem_sizes); + + // Now go through the problems, let the grouper object collate the stats + // appropriately + for (const auto& entry : complete_solutions.solutions) { + const DecodedArchitectureData arch_data(entry.second[0]); + for (unsigned ii = 1; ii < entry.second.size(); ++ii) { + grouper.add(entry.second[ii], arch_data); + } + } + const auto calc_messages = grouper.get_final_messages(); + REQUIRE(calc_messages.size() == expected_messages.size()); + for (unsigned ii = 0; ii < calc_messages.size(); ++ii) { + INFO("for message[" << ii << "]: "); + CHECK(calc_messages[ii] == expected_messages[ii]); + } + // A positive result is good; the fixed complete problems are DIRECTLY + // comparing our TSA with the solver used to generate them. + grouper.check_overall_improvement(expected_improvement); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp new file mode 100644 index 0000000000..ac62c2a1cb --- /dev/null +++ b/tket/tests/TokenSwapping/test_DistancesFromArchitecture.cpp @@ -0,0 +1,80 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "Architecture/DistancesFromArchitecture.hpp" + +using Catch::Matchers::Contains; +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +SCENARIO("Architecture with disconnected graph") { + // Check that distance(v1, v2) does indeed give an error if v1, v2 are in + // different connected components. + const std::vector> edges{ + {0, 1}, {0, 2}, {1, 3}, {4, 5}}; + const size_t number_of_vertices = 6; + const Architecture arch(edges); + // Note: it's a "coincidence" that the vertex numbers are unchanged, + // because 0,1,2,3,4,5 are first seen in this order. + const ArchitectureMapping mapping(arch, edges); + REQUIRE(mapping.number_of_vertices() == number_of_vertices); + DistancesFromArchitecture dist_calculator(mapping); + std::stringstream summary; + for (size_t v1 = 0; v1 < number_of_vertices; ++v1) { + for (size_t v2 = 0; v2 < number_of_vertices; ++v2) { + summary << "d(" << v1 << "," << v2 << ")="; + try { + const auto distance = dist_calculator(v1, v2); + summary << distance << ";"; + if (distance == 0) { + CHECK(v1 == v2); + } else { + CHECK(v1 != v2); + } + } catch (const std::exception& e) { + // 4 or 5 is involved, but not (4,5). + const bool four_or_five_occurs = + (v1 == 4 || v2 == 4 || v1 == 5 || v2 == 5); + CHECK(four_or_five_occurs); + // ...but not (4,5). + CHECK(v1 + v2 != 9); + summary << "INF;"; + const std::string message = e.what(); + CHECK_THAT(message, Contains("are not connected")); + } + } + } + CHECK( + summary.str() == + "d(0,0)=0;d(0,1)=1;d(0,2)=1;d(0,3)=2;d(0,4)=INF;d(0,5)=INF;d(1,0)=1;" + "d(1,1)=0;" + "d(1,2)=2;d(1,3)=1;d(1,4)=INF;d(1,5)=INF;d(2,0)=1;d(2,1)=2;d(2,2)=0;" + "d(2,3)=3;d" + "(2,4)=INF;d(2,5)=INF;d(3,0)=2;d(3,1)=1;d(3,2)=3;d(3,3)=0;d(3,4)=INF;" + "d(3,5)=" + "INF;d(4,0)=INF;d(4,1)=INF;d(4,2)=INF;d(4,3)=INF;d(4,4)=0;d(4,5)=1;d(" + "5,0)=INF;" + "d(5,1)=INF;d(5,2)=INF;d(5,3)=INF;d(5,4)=1;d(5,5)=0;"); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_FullTsa.cpp b/tket/tests/TokenSwapping/test_FullTsa.cpp new file mode 100644 index 0000000000..645381da49 --- /dev/null +++ b/tket/tests/TokenSwapping/test_FullTsa.cpp @@ -0,0 +1,259 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "TestUtils/ArchitectureEdgesReimplementation.hpp" +#include "TestUtils/FullTsaTesting.hpp" +#include "TestUtils/ProblemGeneration.hpp" +#include "TokenSwapping/HybridTsa.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { +struct FullTester { + FullTsaTesting results; + FullTsaTesting trivial_results; + HybridTsa full_tsa; + TrivialTSA trivial_tsa; + RNG rng; + ProblemGenerator00 generator; + std::string test_name; + + void add_problems( + const ArchitectureMapping& arch_mapping, const std::string& arch_name, + const std::string& problem_message) { + rng.set_seed(); + const auto problems = generator.get_problems( + arch_name, arch_mapping.number_of_vertices(), rng, problem_message); + + // OK to reuse RNG, as it's reset before each problem. + results.add_problems(arch_mapping, problems, test_name, rng, full_tsa); + + trivial_tsa.set(TrivialTSA::Options::FULL_TSA); + trivial_results.add_problems( + arch_mapping, problems, test_name, rng, trivial_tsa); + } + + void add_problems( + const vector>& edges, + const std::string& arch_name, const std::string& problem_message, + unsigned expected_number_of_vertices = 0) { + const Architecture arch(edges); + const ArchitectureMapping arch_mapping(arch, edges); + if (expected_number_of_vertices != 0) { + REQUIRE(arch_mapping.number_of_vertices() == expected_number_of_vertices); + } + add_problems(arch_mapping, arch_name, problem_message); + } +}; +} // namespace + +SCENARIO("Full TSA: stars") { + const vector problem_messages{ + "[Star3: 51481: v4 i1 f100 s1: 100 problems; 178 tokens]", + "[Star5: 51528: v6 i1 f100 s1: 100 problems; 270 tokens]", + "[Star10: 51662: v11 i1 f100 s1: 100 problems; 515 tokens]", + "[Star20: 51494: v21 i1 f100 s1: 100 problems; 1015 tokens]"}; + const vector num_spokes{3, 5, 10, 20}; + FullTester tester; + tester.test_name = "Stars"; + std::string arch_name; + vector> edges; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + arch_name = "Star" + std::to_string(num_spokes[index]); + edges.clear(); + for (unsigned vv = 1; vv <= num_spokes[index]; ++vv) { + edges.emplace_back(0, vv); + } + tester.add_problems(edges, arch_name, problem_messages[index]); + } + CHECK( + tester.results.str() == + "[Stars:HybridTsa: 400 probs; 1978 toks; 1623 tot.lb]\n" + "[Total swaps: 2632 2588 2550 2539 2539 2550]\n" + "[Winners: joint: 360 381 392 400 400 392 undisputed: 0 0 0 0 0 0]"); + + CHECK( + tester.trivial_results.str() == + "[Stars:Trivial: 400 probs; 1978 toks; 1623 tot.lb]\n" + "[Total swaps: 3968 3804 3088 3088 3088 3088]\n" + "[Winners: joint: 247 271 400 400 400 400 undisputed: 0 0 0 0 0 0]"); +} + +SCENARIO("Full TSA: wheels") { + const vector problem_messages{ + "[Wheel3: 51481: v4 i1 f100 s1: 100 problems; 178 tokens]", + "[Wheel5: 51528: v6 i1 f100 s1: 100 problems; 270 tokens]", + "[Wheel10: 51662: v11 i1 f100 s1: 100 problems; 515 tokens]", + "[Wheel20: 51494: v21 i1 f100 s1: 100 problems; 1015 tokens]"}; + + const vector num_spokes{3, 5, 10, 20}; + FullTester tester; + tester.test_name = "Wheels"; + std::string arch_name; + vector> edges; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + arch_name = "Wheel" + std::to_string(num_spokes[index]); + edges.clear(); + for (unsigned vv = 1; vv <= num_spokes[index]; ++vv) { + edges.emplace_back(0, vv); + if (vv == num_spokes[index]) { + edges.emplace_back(vv, 1); + } else { + edges.emplace_back(vv, vv + 1); + } + } + tester.add_problems(edges, arch_name, problem_messages[index]); + } + CHECK( + tester.results.str() == + "[Wheels:HybridTsa: 400 probs; 1978 toks; 1533 tot.lb]\n" + "[Total swaps: 2482 2462 2430 2422 2422 2430]\n" + "[Winners: joint: 374 384 395 400 400 395 undisputed: 0 0 0 0 0 0]"); + + CHECK( + tester.trivial_results.str() == + "[Wheels:Trivial: 400 probs; 1978 toks; 1533 tot.lb]\n" + "[Total swaps: 3510 3410 2818 2818 2818 2818]\n" + "[Winners: joint: 283 291 400 400 400 400 undisputed: 0 0 0 0 0 0]"); +} + +SCENARIO("Full TSA: Rings") { + const vector problem_messages{ + "[Ring3: 51582: v3 i1 f100 s1: 100 problems; 135 tokens]", + "[Ring5: 51644: v5 i1 f100 s1: 100 problems; 224 tokens]", + "[Ring10: 51634: v10 i1 f100 s1: 100 problems; 469 tokens]", + "[Ring20: 51498: v20 i1 f100 s1: 100 problems; 974 tokens]"}; + const vector num_vertices{3, 5, 10, 20}; + FullTester tester; + tester.test_name = "Rings"; + std::string arch_name; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + const RingArch arch(num_vertices[index]); + arch_name = "Ring" + std::to_string(num_vertices[index]); + const ArchitectureMapping arch_mapping(arch); + tester.add_problems(arch_mapping, arch_name, problem_messages[index]); + } + // NOTE: results could change, if RingArch changes vertex labelling + // (outside the control of token swapping). + // However this seems unlikely, since rings are so simple. + // See the comments for "Full TSA: Square Grids" (about + // get_square_grid_edges). + CHECK( + tester.results.str() == + "[Rings:HybridTsa: 400 probs; 1802 toks; 3193 tot.lb]\n" + "[Total swaps: 6302 5942 5118 5115 5113 5118]\n" + "[Winners: joint: 292 328 399 399 400 399 undisputed: 0 0 0 0 1 0]"); + + CHECK( + tester.trivial_results.str() == + "[Rings:Trivial: 400 probs; 1802 toks; 3193 tot.lb]\n" + "[Total swaps: 8922 8580 5104 5087 5079 5104]\n" + "[Winners: joint: 231 252 394 397 400 394 undisputed: 0 0 0 0 3 0]"); +} + +SCENARIO("Full TSA: Square Grids") { + const vector> grid_parameters = { + {2, 2, 2}, {3, 4, 4}}; + const vector problem_messages{ + "[Grid(2,2,2): 51480: v8 i1 f100 s1: 100 problems; 368 tokens]", + "[Grid(3,4,4): 51492: v48 i1 f100 s1: 100 problems; 2378 tokens]"}; + + FullTester tester; + tester.test_name = "Square grids"; + + for (size_t index = 0; index < grid_parameters.size(); ++index) { + const auto& parameters = grid_parameters[index]; + + // NOTE: if we used a SquareGrid architecture object, then results + // could change if SquareGrid and/or Architecture changed in future + // (giving different vertex labels, etc.), + // even if the underlying token swapping algorithm is unchanged. + // + // ArchitectureMapping can resolve these issues IF given the original + // vector of EDGES, in the same order as used to construct Architecture. + // The edge vector used to construct a SquareGrid architecture object + // is not available, so we just construct the edges directly, + // to give a fixed test independent of SquareGrid implementation details. + const auto edges = + get_square_grid_edges(parameters[0], parameters[1], parameters[2]); + const Architecture arch(edges); + const ArchitectureMapping arch_mapping(arch, edges); + + std::stringstream ss; + ss << "Grid(" << parameters[0] << "," << parameters[1] << "," + << parameters[2] << ")"; + + tester.add_problems(arch_mapping, ss.str(), problem_messages[index]); + } + + CHECK( + tester.results.str() == + "[Square grids:HybridTsa: 200 probs; 2746 toks; 4323 tot.lb]\n" + "[Total swaps: 7083 7015 6863 6846 6842 6863]\n" + "[Winners: joint: 148 163 188 198 200 188 undisputed: 0 0 0 0 2 0]"); + + CHECK( + tester.trivial_results.str() == + "[Square grids:Trivial: 200 probs; 2746 toks; 4323 tot.lb]\n" + "[Total swaps: 12364 12208 9114 9039 8933 9114]\n" + "[Winners: joint: 85 91 152 177 200 152 undisputed: 0 0 0 0 23 0]"); +} + +SCENARIO("Full TSA: Random trees") { + RandomTreeGenerator00 tree_generator; + FullTester tester; + + const vector problem_messages{ + "[Tree0: 51644: v5 i1 f100 s1: 100 problems; 224 tokens]", + "[Tree1: 51517: v16 i1 f100 s1: 100 problems; 766 tokens]", + "[Tree2: 51481: v24 i1 f100 s1: 100 problems; 1168 tokens]"}; + tester.test_name = "Trees"; + std::string arch_name; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + tree_generator.min_number_of_children = index; + tree_generator.max_number_of_children = 2 + 2 * index; + tree_generator.approx_number_of_vertices = + 4 * tree_generator.max_number_of_children; + + const auto edges = tree_generator.get_tree_edges(tester.rng); + arch_name = "Tree" + std::to_string(index); + tester.add_problems( + edges, arch_name, problem_messages[index], edges.size() + 1); + } + CHECK( + tester.results.str() == + "[Trees:HybridTsa: 300 probs; 2158 toks; 2963 tot.lb]\n" + "[Total swaps: 5216 5132 4844 4828 4817 4844]\n" + "[Winners: joint: 227 251 286 296 300 286 undisputed: 0 0 0 0 4 0]"); + + CHECK( + tester.trivial_results.str() == + "[Trees:Trivial: 300 probs; 2158 toks; 2963 tot.lb]\n" + "[Total swaps: 8128 7886 5592 5570 5563 5600]\n" + "[Winners: joint: 128 148 282 297 300 280 undisputed: 0 0 0 0 3 0]"); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp new file mode 100644 index 0000000000..5978b885a0 --- /dev/null +++ b/tket/tests/TokenSwapping/test_RiverFlowPathFinder.cpp @@ -0,0 +1,262 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "Architecture/ArchitectureMapping.hpp" +#include "Architecture/DistancesFromArchitecture.hpp" +#include "Architecture/NeighboursFromArchitecture.hpp" +#include "TestUtils/ArchitectureEdgesReimplementation.hpp" +#include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "Utils/RNG.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { +// It is a cycle (ring) on vertices [0,1,2,..., N-1], with N ~ 0. +struct DistancesForCycle : public DistancesInterface { + size_t number_of_vertices = 10; + + virtual size_t operator()(size_t v1, size_t v2) override { + size_t distance1; + if (v1 < v2) { + distance1 = v2 - v1; + } else { + distance1 = v1 - v2; + } + const size_t distance2 = number_of_vertices - distance1; + return std::min(distance1, distance2); + } +}; + +class NeighboursForCycle : public NeighboursInterface { + public: + explicit NeighboursForCycle(size_t number_of_vertices) + : m_number_of_vertices(number_of_vertices) { + REQUIRE(number_of_vertices > 1); + if (m_number_of_vertices == 2) { + m_neighbours.resize(1); + } else { + m_neighbours.resize(2); + } + } + + virtual const vector& operator()(size_t vertex) override { + if (vertex >= m_number_of_vertices) { + throw std::runtime_error("neighbours requested for invalid vertex"); + } + m_neighbours[0] = (vertex + 1) % m_number_of_vertices; + if (m_neighbours.size() > 1) { + m_neighbours[1] = + ((vertex + m_number_of_vertices) - 1) % m_number_of_vertices; + } + return m_neighbours; + } + + private: + size_t m_number_of_vertices; + vector m_neighbours; +}; + +struct TestResult { + size_t total_number_of_path_calls = 0; + size_t total_number_of_differing_extra_paths = 0; + + std::string str() const { + std::stringstream ss; + ss << "[ Number of path calls: " << total_number_of_path_calls + << " Extra paths: " << total_number_of_differing_extra_paths << " ]"; + return ss.str(); + } +}; + +} // namespace + +static void do_simple_path_test( + const vector& path, const Swap& endpoints) { + REQUIRE(!path.empty()); + REQUIRE(path[0] == endpoints.first); + REQUIRE(path.back() == endpoints.second); + + const std::set vertices{path.cbegin(), path.cend()}; + REQUIRE(vertices.size() == path.size()); +} + +static void require_path_to_have_valid_edges( + const vector& path, NeighboursInterface& neighbours_interface) { + std::array vertices; + for (size_t ii = 0; ii + 1 < path.size(); ++ii) { + vertices[0].first = path[ii]; + vertices[0].second = path[ii + 1]; + vertices[1].first = path[ii + 1]; + vertices[1].second = path[ii]; + for (const auto& pair : vertices) { + const auto& neighbours = neighbours_interface(pair.first); + bool is_neighbour = false; + for (auto neigh : neighbours) { + if (neigh == pair.second) { + is_neighbour = true; + break; + } + } + REQUIRE(is_neighbour); + } + } +} + +static void test( + TestResult& result, RiverFlowPathFinder& path_finder, + DistancesInterface& distance_calculator, + NeighboursInterface& neighbours_calculator, size_t number_of_vertices, + RNG& rng_for_test_data, size_t number_of_test_repeats = 10) { + // We will check that calculated paths are mostly unchanged. + std::map>> calculated_paths; + + vector possible_path_calls; + possible_path_calls.reserve(number_of_vertices * number_of_vertices); + for (size_t ii = 0; ii < number_of_vertices; ++ii) { + for (size_t jj = 0; jj < number_of_vertices; ++jj) { + possible_path_calls.emplace_back(ii, jj); + calculated_paths[std::make_pair(ii, jj)]; + } + } + + // The first time a path is calculated, its length will be checked using + // the distance_calculator + const auto get_path_size = [&calculated_paths, &distance_calculator]( + const Swap& end_vertices) -> size_t { + if (end_vertices.first == end_vertices.second) { + return 1; + } + const auto& existing_paths = calculated_paths[end_vertices]; + if (!existing_paths.empty()) { + return existing_paths[0].size(); + } + const auto& reversed_existing_paths = calculated_paths[std::make_pair( + end_vertices.second, end_vertices.first)]; + + if (!reversed_existing_paths.empty()) { + return reversed_existing_paths[0].size(); + } + return 1 + distance_calculator(end_vertices.first, end_vertices.second); + }; + + for (size_t counter = number_of_test_repeats; counter > 0; --counter) { + rng_for_test_data.do_shuffle(possible_path_calls); + result.total_number_of_path_calls += possible_path_calls.size(); + + for (const Swap& end_vertices : possible_path_calls) { + const auto& calc_path = + path_finder(end_vertices.first, end_vertices.second); + + do_simple_path_test(calc_path, end_vertices); + REQUIRE(calc_path.size() == get_path_size(end_vertices)); + + auto& path_list = calculated_paths[end_vertices]; + bool found_path = false; + for (auto& path : path_list) { + if (path == calc_path) { + found_path = true; + break; + } + } + if (!found_path) { + if (!path_list.empty()) { + ++result.total_number_of_differing_extra_paths; + } + path_list.emplace_back(calc_path); + require_path_to_have_valid_edges(calc_path, neighbours_calculator); + } + } + } +} + +SCENARIO("Test path generation for cycles") { + RNG rng_for_path_generation; + RNG rng_for_test_data; + DistancesForCycle distances; + TestResult result; + + for (size_t number_of_vertices = 2; number_of_vertices <= 10; + ++number_of_vertices) { + INFO("number_of_vertices = " << number_of_vertices); + distances.number_of_vertices = number_of_vertices; + NeighboursForCycle neighbours(number_of_vertices); + RiverFlowPathFinder path_finder( + distances, neighbours, rng_for_path_generation); + + const auto current_differing_paths = + result.total_number_of_differing_extra_paths; + test( + result, path_finder, distances, neighbours, number_of_vertices, + rng_for_test_data); + + // Even cycles have non-unique paths, for polar opposite vertices; + // odd cycles do not. + if (number_of_vertices % 2 == 1) { + // No extra paths were created. + CHECK( + current_differing_paths == + result.total_number_of_differing_extra_paths); + } + } + REQUIRE(result.str() == "[ Number of path calls: 3840 Extra paths: 3 ]"); +} + +// Deliberately use the same RNG, so it's all mixed up; +// but we still expect not so many different paths. +static void test( + TestResult& result, const ArchitectureMapping& arch_mapping, RNG& rng) { + DistancesFromArchitecture distances(arch_mapping); + NeighboursFromArchitecture neighbours(arch_mapping); + RiverFlowPathFinder path_finder(distances, neighbours, rng); + + test( + result, path_finder, distances, neighbours, + arch_mapping.number_of_vertices(), rng); +} + +SCENARIO("Path generation for ring graph") { + RNG rng; + TestResult result; + const RingArch arch(7); + const ArchitectureMapping arch_mapping(arch); + test(result, arch_mapping, rng); + REQUIRE(result.str() == "[ Number of path calls: 490 Extra paths: 0 ]"); +} + +SCENARIO("Path generation for square grids") { + RNG rng; + TestResult result; + for (size_t ver = 2; ver <= 4; ver += 2) { + for (size_t hor = 1; hor <= 5; hor += 2) { + for (size_t layer = 1; layer <= 3; layer += 2) { + const auto edges = get_square_grid_edges(ver, hor, layer); + const Architecture arch(edges); + const ArchitectureMapping arch_mapping(arch, edges); + test(result, arch_mapping, rng); + } + } + } + REQUIRE(result.str() == "[ Number of path calls: 70000 Extra paths: 583 ]"); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_SwapList.cpp b/tket/tests/TokenSwapping/test_SwapList.cpp new file mode 100644 index 0000000000..184f24ca32 --- /dev/null +++ b/tket/tests/TokenSwapping/test_SwapList.cpp @@ -0,0 +1,55 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "TokenSwapping/SwapFunctions.hpp" + +namespace tket { +namespace tsa_internal { +namespace tests { + +std::string get_swaps_str(const SwapList& swap_list) { + std::stringstream ss; + const auto svect = swap_list.to_vector(); + ss << "[" << svect.size() << " swaps:"; + for (auto swap : svect) { + ss << " (" << swap.first << " " << swap.second << ") "; + } + ss << "]"; + return ss.str(); +} + +SCENARIO("simple swap list") { + SwapList swap_list; + CHECK(get_swaps_str(swap_list) == "[0 swaps:]"); + swap_list.clear(); + CHECK(get_swaps_str(swap_list) == "[0 swaps:]"); + swap_list.push_front(get_swap(0, 1)); + CHECK(get_swaps_str(swap_list) == "[1 swaps: (0 1) ]"); + const auto current_front = swap_list.front_id().value(); + const auto new_front = swap_list.emplace_front(); + CHECK(current_front != new_front); + CHECK(new_front == swap_list.front_id().value()); + swap_list.front() = get_swap(998, 999); + CHECK(get_swaps_str(swap_list) == "[2 swaps: (998 999) (0 1) ]"); + swap_list.pop_front(); + CHECK(get_swaps_str(swap_list) == "[1 swaps: (0 1) ]"); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp new file mode 100644 index 0000000000..dc3cef50fc --- /dev/null +++ b/tket/tests/TokenSwapping/test_SwapListOptimiser.cpp @@ -0,0 +1,495 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "TestUtils/DebugFunctions.hpp" +#include "TokenSwapping/SwapListOptimiser.hpp" +#include "Utils/RNG.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { + +// Only checks that swaps are correct, doesn't measure how good they are +class SwapCorrectnessTester { + public: + // Perform the raw swaps for comparison. + void reset(const vector& raw_swaps) { + m_final_tracker.reset(); + for (const auto& swap : raw_swaps) { + (void)m_final_tracker.do_vertex_swap(swap); + } + m_number_of_raw_swaps = raw_swaps.size(); + } + + void require_equal_permutations(const SwapList& swap_list) const { + m_tracker_to_change.reset(); + size_t num_swaps = 0; + for (auto id = swap_list.front_id(); id; id = swap_list.next(id.value())) { + m_tracker_to_change.do_vertex_swap(swap_list.at(id.value())); + ++num_swaps; + } + REQUIRE(num_swaps == swap_list.size()); + REQUIRE(m_tracker_to_change.equal_vertex_permutation_from_swaps( + m_final_tracker)); + REQUIRE(m_number_of_raw_swaps >= num_swaps); + } + + private: + size_t m_number_of_raw_swaps = 0; + DynamicTokenTracker m_final_tracker; + mutable DynamicTokenTracker m_tracker_to_change; +}; + +// As well as correctness, also checks that optimisation passes +// do actually perform quite well. +class SwapTester { + public: + SwapTester() { + m_optimisation_functions.reserve(5); + + m_optimisation_functions.emplace_back([](const vector& raw_swaps, + SwapList& list, + SwapListOptimiser& optimiser) { + for (const Swap& swap : raw_swaps) { + optimiser.push_back(list, swap); + } + }); + m_optimisation_functions.emplace_back( + [](const vector&, SwapList& list, SwapListOptimiser& optimiser) { + optimiser.optimise_pass_with_zero_travel(list); + }); + m_optimisation_functions.emplace_back( + [](const vector&, SwapList& list, SwapListOptimiser& optimiser) { + optimiser.optimise_pass_with_frontward_travel(list); + }); + m_optimisation_functions.emplace_back( + [](const vector&, SwapList& list, SwapListOptimiser& optimiser) { + optimiser.optimise_pass_with_token_tracking(list); + }); + m_optimisation_functions.emplace_back( + [](const vector&, SwapList& list, SwapListOptimiser& optimiser) { + optimiser.full_optimise(list); + }); + reset_counters(); + } + + void reset_counters() { + // Also includes the number of raw swaps, + // and the number of tests. + m_counts.resize(m_optimisation_functions.size() + 2); + std::fill(m_counts.begin(), m_counts.end(), 0); + } + + void test(const vector& raw_swaps) { + ++m_counts[0]; + m_counts[1] += raw_swaps.size(); + m_correctness_tester.reset(raw_swaps); + + for (size_t ii = 0; ii < m_optimisation_functions.size(); ++ii) { + m_swap_list.clear(); + if (ii != 0) { + for (const auto& swap : raw_swaps) { + m_swap_list.push_back(swap); + } + } + m_optimisation_functions[ii](raw_swaps, m_swap_list, m_optimiser); + m_correctness_tester.require_equal_permutations(m_swap_list); + m_counts[ii + 2] += m_swap_list.size(); + } + } + + std::string get_final_result() const { + std::stringstream ss; + ss << "[ " << m_counts[0] << " tests; swap counts:"; + for (size_t ii = 1; ii < m_counts.size(); ++ii) { + ss << " " << m_counts[ii] << " "; + } + ss << "]"; + return ss.str(); + } + + private: + vector< + std::function&, SwapList&, SwapListOptimiser&)>> + m_optimisation_functions; + + vector m_counts; + + SwapList m_swap_list; + SwapListOptimiser m_optimiser; + SwapCorrectnessTester m_correctness_tester; + size_t number_of_tests; +}; +} // namespace + +SCENARIO("Random swaps are optimised") { + RNG rng; + SwapTester tester; + vector raw_swaps; + const vector num_vertices{5, 10, 20}; + + // We will multiply the number of possible distinct swaps + // by these numbers, then divide by 100, to determine how many swaps + // to generate for the test. + const vector percentages{50, 100, 200, 500}; + + // Not necessarily contiguous. + std::set vertices_set; + + for (size_t number_of_vertices : num_vertices) { + const size_t possible_swaps = + (number_of_vertices * (number_of_vertices - 1)) / 2; + for (auto percent : percentages) { + const size_t num_swaps = (possible_swaps * percent) / 100; + vertices_set.clear(); + + for (size_t ii = 0; ii < number_of_vertices; ++ii) { + vertices_set.insert(ii); + } + const vector vertices(vertices_set.cbegin(), vertices_set.cend()); + for (int test_counter = 0; test_counter < 1; ++test_counter) { + INFO( + "test_counter=" << test_counter << ", number_of_vertices=" + << number_of_vertices << ", percent=" << percent); + + for (size_t jj = 0; jj < num_swaps; ++jj) { + const auto v1 = rng.get_element(vertices); + auto v2 = v1; + while (v1 == v2) { + v2 = rng.get_element(vertices); + } + raw_swaps.emplace_back(get_swap(v1, v2)); + } + tester.test(raw_swaps); + } + } + } + CHECK( + tester.get_final_result() == + "[ 12 tests; swap counts: 5636 5256 4976 4976 264 268 ]"); +} + +namespace { +// The above test just generates completely random swap sequences +// on N vertices. For a more realistic sequence, we try choosing them +// from a smaller list of possible swaps +// (thus, representing swaps on an incomplete graph). +// This might be more realistic. +struct EdgesGenerator { + std::set swaps_set; + size_t approx_num_vertices = 5; + size_t approx_num_edges = 10; + size_t percentage_to_add_new_vertex = 50; + + vector get_swaps(RNG& rng, size_t& actual_num_vertices) { + actual_num_vertices = 2; + swaps_set.clear(); + swaps_set.insert(get_swap(0, 1)); + + for (size_t counter = 10 * approx_num_edges; counter > 0; --counter) { + if (actual_num_vertices >= approx_num_vertices || + swaps_set.size() >= approx_num_edges) { + break; + } + bool add_new_vertex = rng.check_percentage(percentage_to_add_new_vertex); + if (!add_new_vertex) { + const auto current_edges = swaps_set.size(); + for (int edge_attempt = 10; edge_attempt > 0; --edge_attempt) { + const auto v1 = rng.get_size_t(actual_num_vertices - 1); + const auto v2 = rng.get_size_t(actual_num_vertices - 1); + if (v1 != v2) { + swaps_set.insert(get_swap(v1, v2)); + if (current_edges != swaps_set.size()) { + break; + } + } + } + if (current_edges != swaps_set.size()) { + continue; + } + add_new_vertex = true; + } + if (add_new_vertex) { + swaps_set.insert(get_swap( + rng.get_size_t(actual_num_vertices - 1), actual_num_vertices)); + ++actual_num_vertices; + continue; + } + } + vector result{swaps_set.cbegin(), swaps_set.cend()}; + return result; + } +}; + +struct ManyTestsRunner { + SwapTester tester; + + EdgesGenerator swaps_generator; + vector possible_swaps; + size_t actual_num_vertices; + vector raw_swaps; + + void run( + RNG& rng, const vector& approx_num_vertices, + const vector& approx_num_edges_percentages, + const vector& swap_length_percentages, + size_t num_tests_per_parameter_list) { + for (auto approx_nv : approx_num_vertices) { + swaps_generator.approx_num_vertices = approx_nv; + for (auto approx_nep : approx_num_edges_percentages) { + swaps_generator.approx_num_edges = + approx_nv / 2 + (approx_nv * (approx_nv - 1) * approx_nep) / 200; + for (size_t num_graphs = 0; num_graphs < 1; ++num_graphs) { + possible_swaps = swaps_generator.get_swaps(rng, actual_num_vertices); + for (auto slp : swap_length_percentages) { + const size_t swap_list_length = + 1 + (possible_swaps.size() * slp) / 100; + for (size_t test_counter = 0; + test_counter < num_tests_per_parameter_list; ++test_counter) { + raw_swaps.clear(); + for (size_t nn = 0; nn < swap_list_length; ++nn) { + raw_swaps.push_back(rng.get_element(possible_swaps)); + } + tester.test(raw_swaps); + } + } + } + } + } + } +}; +} // namespace + +SCENARIO("More realistic swap sequences") { + RNG rng; + const size_t num_tests_per_parameter_list = 10; + + // How many edges should we aim for, as a rough percentage of + // the total number n(n-1)/2 of possibilities? + const vector approx_num_edges_percentages{5, 10, 20, 30, 40, 80}; + + // How long should the swap length be, as a percentage of the + // total possible number of swaps? + const vector swap_length_percentages{50, 100, 200}; + + { + const vector approx_num_vertices{5, 8}; + ManyTestsRunner runner; + runner.run( + rng, approx_num_vertices, approx_num_edges_percentages, + swap_length_percentages, num_tests_per_parameter_list); + CHECK( + runner.tester.get_final_result() == + "[ 360 tests; swap counts: 3160 2380 2104 2104 1396 1406 ]"); + } + { + const vector approx_num_vertices{10, 12, 14}; + ManyTestsRunner runner; + runner.run( + rng, approx_num_vertices, approx_num_edges_percentages, + swap_length_percentages, num_tests_per_parameter_list); + CHECK( + runner.tester.get_final_result() == + "[ 540 tests; swap counts: 10370 9048 7580 7580 5180 5216 ]"); + } + { + const vector approx_num_vertices{30, 35, 40}; + ManyTestsRunner runner; + runner.run( + rng, approx_num_vertices, approx_num_edges_percentages, + swap_length_percentages, num_tests_per_parameter_list); + CHECK( + runner.tester.get_final_result() == + "[ 540 tests; swap counts: 38900 37626 30944 30944 24714 " + "24720 ]"); + } +} + +// If we perform a sequence of swaps, then again in reverse order, +// (and thus, make a palindrome), it ALWAYS equals the identity permutation. +// (Of course, odd-length palindromes like "(0,1)" do NOT give the identity!) +// It seems "obvious" that zero-travel and frontwards-travel passes +// should optimise (even-length) palindromes to zero; but is it actually true?! +// Token-tracking passes definitely do NOT, but counterexamples are rare. +// (Even though token-tracking IRREDUCIBILITY can be shown to be +// STRICTLY STRONGER than zero-travel or frontwards-travel IRREDUCIBILITY!) +SCENARIO("Trivial swap list reversed order optimisation; pass comparisons") { + vector possible_swaps; + const unsigned num_vertices = 4; + + for (unsigned ii = 0; ii < num_vertices; ++ii) { + for (unsigned jj = ii + 1; jj < num_vertices; ++jj) { + possible_swaps.push_back(get_swap(ii, jj)); + } + } + vector raw_swaps; + SwapList swaps; + SwapListOptimiser optimiser; + + const auto push_back_swaps = [&raw_swaps, &swaps]() { + swaps.fast_clear(); + for (auto swap : raw_swaps) { + swaps.push_back(swap); + } + }; + + const auto concatenate_reversed_swaps = [&raw_swaps, &swaps, + &push_back_swaps]() { + push_back_swaps(); + for (auto citer = raw_swaps.crbegin(); citer != raw_swaps.crend(); + ++citer) { + swaps.push_back(*citer); + } + }; + + size_t simple_travel_equals_token_tracking_count = 0; + size_t simple_travel_beats_token_tracking_count = 0; + size_t simple_travel_beaten_by_token_tracking_count = 0; + size_t full_optimise_fully_reduces_palindrome = 0; + size_t full_optimise_does_not_destroy_palindrome = 0; + size_t token_tracking_pass_fully_reduces_palindrome = 0; + size_t token_tracking_pass_does_not_destroy_palindrome = 0; + + RNG rng; + + for (int test_counter = 0; test_counter < 1000; ++test_counter) { + if (raw_swaps.size() > 20) { + raw_swaps.clear(); + } + raw_swaps.push_back(rng.get_element(possible_swaps)); + + concatenate_reversed_swaps(); + optimiser.optimise_pass_with_zero_travel(swaps); + CHECK(swaps.size() == 0); + + concatenate_reversed_swaps(); + optimiser.optimise_pass_with_frontward_travel(swaps); + CHECK(swaps.size() == 0); + + concatenate_reversed_swaps(); + optimiser.optimise_pass_with_token_tracking(swaps); + if (swaps.size() == 0) { + ++token_tracking_pass_fully_reduces_palindrome; + } else { + ++token_tracking_pass_does_not_destroy_palindrome; + } + + concatenate_reversed_swaps(); + optimiser.full_optimise(swaps); + if (swaps.size() == 0) { + ++full_optimise_fully_reduces_palindrome; + } else { + ++full_optimise_does_not_destroy_palindrome; + } + + push_back_swaps(); + optimiser.optimise_pass_with_zero_travel(swaps); + const auto zero_travel_reduced_size = swaps.size(); + + push_back_swaps(); + optimiser.optimise_pass_with_frontward_travel(swaps); + const auto frontward_travel_reduced_size = swaps.size(); + CHECK(zero_travel_reduced_size == frontward_travel_reduced_size); + + push_back_swaps(); + optimiser.optimise_pass_with_token_tracking(swaps); + + const auto token_tracking_reduced_size = swaps.size(); + if (token_tracking_reduced_size == zero_travel_reduced_size) { + ++simple_travel_equals_token_tracking_count; + } else { + if (token_tracking_reduced_size < zero_travel_reduced_size) { + ++simple_travel_beaten_by_token_tracking_count; + } else { + ++simple_travel_beats_token_tracking_count; + } + } + } + CHECK(simple_travel_equals_token_tracking_count == 299); + CHECK(simple_travel_beaten_by_token_tracking_count == 697); + CHECK(simple_travel_beats_token_tracking_count == 4); + CHECK(full_optimise_fully_reduces_palindrome == 1000); + CHECK(full_optimise_does_not_destroy_palindrome == 0); + CHECK(token_tracking_pass_fully_reduces_palindrome == 976); + CHECK(token_tracking_pass_does_not_destroy_palindrome == 24); +} + +SCENARIO("specific swap list optimisation counterexamples") { + SwapList swaps; + SwapListOptimiser optimiser; + // Illustrates that general-travel irreducible does NOT imply token-tracking + // irreducible. (Of course, we haven't IMPLEMENTED general-travel reduction, + // but we can PROVE that general-travel irreducibility is equivalent to + // zero-travel and frontwards-travel irreducibility). + swaps.push_back(get_swap(0, 1)); + swaps.push_back(get_swap(0, 2)); + swaps.push_back(get_swap(0, 1)); + swaps.push_back(get_swap(0, 2)); + optimiser.optimise_pass_with_zero_travel(swaps); + CHECK(swaps.size() == 4); + optimiser.optimise_pass_with_frontward_travel(swaps); + CHECK(swaps.size() == 4); + optimiser.optimise_pass_with_token_tracking(swaps); + CHECK(str(swaps) == " (0,2) (0,1) "); + + // Are palindromes S + Reverse(S) ALWAYS optimised to an empty list by zero + // travel or frontwards travel passes? Seems so, but how to prove it? (We know + // that for IRREDUCIBILITY, zero-travel, frontwards-travel, general-travel + // give equivalent concepts, and token-tracking gives a strictly stronger + // pass, i.e. token-tracking irreducible => zero-travel irreducible, etc. but + // NOT conversely. But we have no such results for sequence reduction, and + // this counterexample illustrates that). + const vector swap_sequence_palindrome{ + {1, 2}, {1, 3}, {0, 2}, {1, 3}, {1, 3}, {2, 3}, {0, 1}, {1, 2}, + {0, 1}, {0, 2}, {1, 2}, {0, 3}, {0, 3}, {1, 2}, {0, 2}, {0, 1}, + {1, 2}, {0, 1}, {2, 3}, {1, 3}, {1, 3}, {0, 2}, {1, 3}, {1, 2}}; + REQUIRE(swap_sequence_palindrome.size() % 2 == 0); + for (unsigned ii = 0; ii < swap_sequence_palindrome.size(); ++ii) { + REQUIRE( + swap_sequence_palindrome[ii] == + swap_sequence_palindrome[swap_sequence_palindrome.size() - 1 - ii]); + } + + const auto push_back_swaps = [&swaps, &swap_sequence_palindrome]() { + swaps.fast_clear(); + for (auto swap : swap_sequence_palindrome) { + swaps.push_back(swap); + } + }; + + push_back_swaps(); + optimiser.optimise_pass_with_frontward_travel(swaps); + CHECK(swaps.size() == 0); + + push_back_swaps(); + optimiser.optimise_pass_with_zero_travel(swaps); + CHECK(swaps.size() == 0); + + push_back_swaps(); + optimiser.optimise_pass_with_token_tracking(swaps); + CHECK(str(swaps) == " (0,3) (0,1) (2,3) (0,2) (1,3) (1,2) "); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp b/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp new file mode 100644 index 0000000000..80a7d09b19 --- /dev/null +++ b/tket/tests/TokenSwapping/test_SwapsFromQubitMapping.cpp @@ -0,0 +1,121 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "Architecture/BestTsaWithArch.hpp" +#include "Utils/RNG.hpp" + +using std::vector; + +// Detailed algorithmic checks with quantitative benchmarks +// are done elsewhere, so this is really just checking conversion. + +namespace tket { +namespace tests { + +SCENARIO("get_swaps : swaps returned directly from architecture") { + // Will summarise relevant data, so that we can see any changes. + std::stringstream problem_ss; + + const SquareGrid arch(3, 4, 2); + const auto nodes = arch.get_all_nodes_vec(); + const auto edges = arch.get_all_edges_vec(); + problem_ss << nodes.size() << " nodes; " << edges.size() << " edges."; + + // The value is the set of all neighbouring nodes. + std::map> allowed_edges_map; + for (auto [n1, n2] : edges) { + REQUIRE(n1 != n2); + allowed_edges_map[n1].insert(n2); + allowed_edges_map[n2].insert(n1); + } + + // Key: a node Value: its original position in "nodes" + std::map original_vertex_indices; + for (size_t ii = 0; ii < nodes.size(); ++ii) { + original_vertex_indices[nodes[ii]] = ii; + } + RNG rng_to_generate_swaps; + auto nodes_copy = nodes; + rng_to_generate_swaps.do_shuffle(nodes_copy); + const auto node_final_positions = nodes_copy; + + problem_ss << " Node mapping:"; + BestTsaWithArch::NodeMapping node_mapping; + for (size_t ii = 0; ii < nodes.size(); ++ii) { + problem_ss << "\ni=" << ii << " : " << node_final_positions[ii].repr() + << " -> " << nodes[ii].repr(); + node_mapping[node_final_positions[ii]] = nodes[ii]; + } + CHECK( + problem_ss.str() == + "24 nodes; 46 edges. Node mapping:\n" + "i=0 : gridNode[0, 0, 0] -> gridNode[0, 0, 0]\n" + "i=1 : gridNode[0, 3, 0] -> gridNode[0, 0, 1]\n" + "i=2 : gridNode[2, 1, 0] -> gridNode[0, 1, 0]\n" + "i=3 : gridNode[0, 1, 1] -> gridNode[0, 1, 1]\n" + "i=4 : gridNode[2, 2, 0] -> gridNode[0, 2, 0]\n" + "i=5 : gridNode[1, 1, 1] -> gridNode[0, 2, 1]\n" + "i=6 : gridNode[0, 0, 1] -> gridNode[0, 3, 0]\n" + "i=7 : gridNode[0, 3, 1] -> gridNode[0, 3, 1]\n" + "i=8 : gridNode[1, 3, 0] -> gridNode[1, 0, 0]\n" + "i=9 : gridNode[1, 0, 0] -> gridNode[1, 0, 1]\n" + "i=10 : gridNode[2, 2, 1] -> gridNode[1, 1, 0]\n" + "i=11 : gridNode[0, 1, 0] -> gridNode[1, 1, 1]\n" + "i=12 : gridNode[2, 0, 1] -> gridNode[1, 2, 0]\n" + "i=13 : gridNode[1, 2, 1] -> gridNode[1, 2, 1]\n" + "i=14 : gridNode[1, 3, 1] -> gridNode[1, 3, 0]\n" + "i=15 : gridNode[1, 0, 1] -> gridNode[1, 3, 1]\n" + "i=16 : gridNode[2, 0, 0] -> gridNode[2, 0, 0]\n" + "i=17 : gridNode[2, 1, 1] -> gridNode[2, 0, 1]\n" + "i=18 : gridNode[0, 2, 1] -> gridNode[2, 1, 0]\n" + "i=19 : gridNode[1, 2, 0] -> gridNode[2, 1, 1]\n" + "i=20 : gridNode[0, 2, 0] -> gridNode[2, 2, 0]\n" + "i=21 : gridNode[1, 1, 0] -> gridNode[2, 2, 1]\n" + "i=22 : gridNode[2, 3, 0] -> gridNode[2, 3, 0]\n" + "i=23 : gridNode[2, 3, 1] -> gridNode[2, 3, 1]"); + + // Calculate swaps to enact the permutation. + const auto node_swaps = BestTsaWithArch::get_swaps(arch, node_mapping); + + // This will hopefully decrease over time + // as we improve the algorithm. + // HOWEVER, apart from the underlying token swapping algorithm, + // there is ANOTHER possible way for this to change: + // Architecture could change the order of nodes returned + // in nodes(), which would cause vertex relabelling and hence + // an isomorphic but different token swapping problem. + // This is UNAVOIDABLE, since get_swaps takes an Architecture + // object, NOT an ArchitectureMapping object. + // This is not really a problem (unless the number of swaps + // changes massively), since the solution is checked + // for correctness. + CHECK(node_swaps.size() == 27); + + // Go back to the original configuration, and perform the swaps. + nodes_copy = nodes; + for (const auto& node_swap : node_swaps) { + REQUIRE(allowed_edges_map.at(node_swap.first).count(node_swap.second) != 0); + const auto index1 = original_vertex_indices.at(node_swap.first); + const auto index2 = original_vertex_indices.at(node_swap.second); + REQUIRE(index1 != index2); + std::swap(nodes_copy[index1], nodes_copy[index2]); + } + REQUIRE(nodes_copy == node_final_positions); +} + +} // namespace tests +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp new file mode 100644 index 0000000000..25717d8240 --- /dev/null +++ b/tket/tests/TokenSwapping/test_VariousPartialTsa.cpp @@ -0,0 +1,254 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "TestUtils/ArchitectureEdgesReimplementation.hpp" +#include "TestUtils/DebugFunctions.hpp" +#include "TestUtils/PartialTsaTesting.hpp" +#include "TestUtils/ProblemGeneration.hpp" +#include "TokenSwapping/CyclesPartialTsa.hpp" +#include "TokenSwapping/RiverFlowPathFinder.hpp" +#include "TokenSwapping/TrivialTSA.hpp" +#include "Utils/RNG.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +namespace { +struct Tester { + vector messages_full_trivial_tsa; + vector messages_partial_trivial_tsa; + vector messages_cycles_tsa_0; + mutable RNG rng; + mutable TrivialTSA trivial_tsa; + mutable CyclesPartialTsa cycles_tsa; + + void run_test( + const ArchitectureMapping& arch_mapping, + const vector& problems, size_t index) const { + trivial_tsa.set(TrivialTSA::Options::FULL_TSA); + CHECK( + run_tests( + arch_mapping, problems, rng, trivial_tsa, + RequiredTsaProgress::FULL) == messages_full_trivial_tsa[index]); + + trivial_tsa.set(TrivialTSA::Options::BREAK_AFTER_PROGRESS); + CHECK( + run_tests( + arch_mapping, problems, rng, trivial_tsa, + RequiredTsaProgress::NONZERO) == + messages_partial_trivial_tsa[index]); + + CHECK( + run_tests( + arch_mapping, problems, rng, cycles_tsa, + RequiredTsaProgress::NONE) == messages_cycles_tsa_0[index]); + } +}; + +} // namespace +SCENARIO("Partial TSA: Rings") { + const vector problem_messages{ + "[Ring3: 51582: v3 i1 f100 s1: 100 problems; 135 tokens]", + "[Ring4: 51481: v4 i1 f100 s1: 100 problems; 178 tokens]", + "[Ring5: 51644: v5 i1 f100 s1: 100 problems; 224 tokens]", + "[Ring6: 51528: v6 i1 f100 s1: 100 problems; 270 tokens]", + "[Ring7: 51496: v7 i1 f100 s1: 100 problems; 318 tokens]", + "[Ring30: 51633: v30 i1 f100 s1: 100 problems; 1473 tokens]"}; + + Tester tester; + tester.messages_full_trivial_tsa = { + "[TSA=Trivial FULL PF=RiverFlow\n" + "135 tokens; 69 total L; 55 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 50, max 100, av 82]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "178 tokens; 156 total L; 144 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 33, max 100, av 69]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "224 tokens; 260 total L; 273 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 33, max 100, av 59]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "270 tokens; 405 total L; 464 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 30, max 100, av 52]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "318 tokens; 511 total L; 596 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 30, max 100, av 49]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "1473 tokens; 10908 total L; 16873 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 26, max 50, av 36]"}; + + tester.messages_partial_trivial_tsa = { + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "135 tokens; 69 total L; 49 swaps.\n" + "L-decr %: min 50, max 100, av 97.\n" + "Power %: min 50, max 100, av 82]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "178 tokens; 156 total L; 101 swaps.\n" + "L-decr %: min 20, max 100, av 80.\n" + "Power %: min 16, max 100, av 67]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "224 tokens; 260 total L; 129 swaps.\n" + "L-decr %: min 12, max 100, av 61.\n" + "Power %: min 16, max 100, av 58]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "270 tokens; 405 total L; 186 swaps.\n" + "L-decr %: min 7, max 100, av 49.\n" + "Power %: min 8, max 100, av 52]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "318 tokens; 511 total L; 196 swaps.\n" + "L-decr %: min 7, max 100, av 39.\n" + "Power %: min 5, max 100, av 50]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "1473 tokens; 10908 total L; 273 swaps.\n" + "L-decr %: min 0, max 50, av 2.\n" + "Power %: min 1, max 100, av 46]"}; + + tester.messages_cycles_tsa_0 = { + "[TSA=Cycles PF=RiverFlow\n" + "135 tokens; 69 total L; 55 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 50, max 100, av 82]", + + "[TSA=Cycles PF=RiverFlow\n" + "178 tokens; 156 total L; 119 swaps.\n" + "L-decr %: min 0, max 100, av 97.\n" + "Power %: min 0, max 100, av 72]", + + "[TSA=Cycles PF=RiverFlow\n" + "224 tokens; 260 total L; 194 swaps.\n" + "L-decr %: min 0, max 100, av 94.\n" + "Power %: min 0, max 100, av 65]", + + "[TSA=Cycles PF=RiverFlow\n" + "270 tokens; 405 total L; 294 swaps.\n" + "L-decr %: min 0, max 100, av 92.\n" + "Power %: min 0, max 100, av 63]", + + "[TSA=Cycles PF=RiverFlow\n" + "318 tokens; 511 total L; 357 swaps.\n" + "L-decr %: min 0, max 100, av 89.\n" + "Power %: min 0, max 100, av 62]", + + "[TSA=Cycles PF=RiverFlow\n" + "1473 tokens; 10908 total L; 6344 swaps.\n" + "L-decr %: min 42, max 100, av 79.\n" + "Power %: min 50, max 86, av 61]"}; + + std::string arch_name; + const ProblemGenerator00 generator; + + for (size_t index = 0; index < problem_messages.size(); ++index) { + auto num_vertices = index + 3; + if (num_vertices == 8) { + num_vertices = 30; + } + const RingArch arch(num_vertices); + arch_name = "Ring" + std::to_string(num_vertices); + + // OK to reuse RNG, as it's reset before each problem. + tester.rng.set_seed(); + const auto problems = generator.get_problems( + arch_name, num_vertices, tester.rng, problem_messages[index]); + + const ArchitectureMapping arch_mapping(arch); + tester.run_test(arch_mapping, problems, index); + } +} + +SCENARIO("Partial TSA: Square grid") { + const vector> grid_parameters = { + {2, 3, 3}, {5, 5, 3}}; + const vector problem_messages{ + "[Grid(2,3,3): 51683: v18 i1 f100 s1: 100 problems; 865 tokens]", + "[Grid(5,5,3): 51573: v75 i1 f100 s1: 100 problems; 3751 tokens]"}; + + Tester tester; + tester.messages_full_trivial_tsa = { + "[TSA=Trivial FULL PF=RiverFlow\n" + "865 tokens; 1921 total L; 2592 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 31, max 100, av 41]", + + "[TSA=Trivial FULL PF=RiverFlow\n" + "3751 tokens; 15297 total L; 23212 swaps.\n" + "L-decr %: min 100, max 100, av 100.\n" + "Power %: min 28, max 50, av 36]"}; + + tester.messages_partial_trivial_tsa = { + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "865 tokens; 1921 total L; 153 swaps.\n" + "L-decr %: min 2, max 100, av 12.\n" + "Power %: min 8, max 100, av 48]", + + "[TSA=Trivial NONZERO PF=RiverFlow\n" + "3751 tokens; 15297 total L; 193 swaps.\n" + "L-decr %: min 0, max 25, av 1.\n" + "Power %: min 5, max 100, av 44]"}; + + tester.messages_cycles_tsa_0 = { + "[TSA=Cycles PF=RiverFlow\n" + "865 tokens; 1921 total L; 1425 swaps.\n" + "L-decr %: min 60, max 100, av 95.\n" + "Power %: min 46, max 100, av 61]", + + "[TSA=Cycles PF=RiverFlow\n" + "3751 tokens; 15297 total L; 11464 swaps.\n" + "L-decr %: min 83, max 100, av 95.\n" + "Power %: min 50, max 79, av 59]"}; + + const ProblemGenerator00 generator; + + for (size_t index = 0; index < grid_parameters.size(); ++index) { + const auto& parameters = grid_parameters[index]; + + const auto edges = + get_square_grid_edges(parameters[0], parameters[1], parameters[2]); + const Architecture arch(edges); + const ArchitectureMapping arch_mapping(arch, edges); + + std::stringstream ss; + ss << "Grid(" << parameters[0] << "," << parameters[1] << "," + << parameters[2] << ")"; + + tester.rng.set_seed(); + const auto problems = generator.get_problems( + ss.str(), arch.n_nodes(), tester.rng, problem_messages[index]); + + tester.run_test(arch_mapping, problems, index); + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_VectorListHybrid.cpp b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp new file mode 100644 index 0000000000..ea179510df --- /dev/null +++ b/tket/tests/TokenSwapping/test_VectorListHybrid.cpp @@ -0,0 +1,256 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "TokenSwapping/VectorListHybrid.hpp" +#include "Utils/RNG.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +typedef VectorListHybrid List; +typedef List::ID ID; + +SCENARIO("Reversing a list") { + RNG rng; + List list; + auto copied_elements = list.to_vector(); + vector copied_elements_again; + REQUIRE(copied_elements.empty()); + for (int count = 0; count < 1000; ++count) { + const unsigned x = rng.get_size_t(1000); + switch (x % 7) { + // Should we delete? + case 0: + if (list.size() != 0) { + const auto id = list.front_id().value(); + list.erase(id); + } + break; + case 1: + if (list.size() != 0) { + const auto id = list.back_id().value(); + list.erase(id); + } + break; + case 2: + list.clear(); + break; + default: + break; + } + if (x % 2 == 0) { + list.push_front(x); + } else { + list.push_back(x); + } + copied_elements = list.to_vector(); + list.reverse(); + copied_elements_again = list.to_vector(); + std::reverse(copied_elements.begin(), copied_elements.end()); + REQUIRE(copied_elements == copied_elements_again); + } +} + +// Write the contents to a string for testing, possibly including IDs. +static std::string repr(const List& list, bool include_ids) { + std::stringstream ss; + ss << "[size " << list.size() << ": "; + for (auto id_opt = list.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = list.next(id); + ss << list.at(id) << " "; + } + if (include_ids) { + ss << "; ids: "; + for (auto id_opt = list.front_id(); id_opt;) { + const auto id = id_opt.value(); + id_opt = list.next(id); + ss << id << " "; + } + } + ss << "]"; + return ss.str(); +} + +// In "operations", a positive number p means go to position p % size() in the +// list, and insert a number there. A negative number n means do the same thing +// with abs(n) % size(), but erase instead of insert. Returns a string +// representing the elements which were erased/inserted, again using negative +// numbers to denote erasure. Does NOT give the IDs. +static std::string perform_operation( + const vector& operations, List& list, unsigned& next_element) { + std::stringstream ss; + ss << "["; + for (int position_code : operations) { + REQUIRE(position_code != 0); + const auto size = list.size(); + if (size == 0) { + if (position_code > 0) { + list.push_back(next_element); + ss << "new: " << next_element << " "; + next_element += 100; + continue; + } + // Cannot erase from an empty list! + ss << "; "; + continue; + } + // It's nonempty. + unsigned position = std::abs(position_code); + position %= size; + ID id = list.front_id().value(); + for (unsigned nn = 0; nn < position; ++nn) { + const auto next_id = list.next(id); + REQUIRE(next_id); + id = next_id.value(); + } + ss << "at " << position << ": "; + if (position_code > 0) { + ss << next_element << " "; + const ID new_id = list.insert_after(id); + list.at(new_id) = next_element; + next_element += 100; + continue; + } + ss << "-" << list.at(id) << " "; + list.erase(id); + } + ss << "]"; + return ss.str(); +} + +namespace { +struct Result { + std::string initial_op_str; + std::string list_str_after_one_op; + std::string list_str_after_one_op_without_ids; + std::string op_str_after_two_ops; + std::string list_str_after_two_ops; + std::string list_str_after_two_ops_without_ids; + + Result(const vector& operations, List& list, unsigned& next_element) + : initial_op_str(perform_operation(operations, list, next_element)), + list_str_after_one_op(repr(list, true)), + list_str_after_one_op_without_ids(repr(list, false)), + op_str_after_two_ops(perform_operation(operations, list, next_element)), + list_str_after_two_ops(repr(list, true)), + list_str_after_two_ops_without_ids(repr(list, false)) {} + + void check_equal_contents_without_ids(const Result& other) const { + CHECK(initial_op_str == other.initial_op_str); + CHECK( + list_str_after_one_op_without_ids == + other.list_str_after_one_op_without_ids); + CHECK(op_str_after_two_ops == other.op_str_after_two_ops); + CHECK( + list_str_after_two_ops_without_ids == + other.list_str_after_two_ops_without_ids); + } + + void check_equal_id_data(const Result& other) const { + CHECK(list_str_after_one_op == other.list_str_after_one_op); + CHECK(list_str_after_two_ops == other.list_str_after_two_ops); + } + + void check_different_id_data(const Result& other) const { + CHECK(list_str_after_one_op != other.list_str_after_one_op); + CHECK(list_str_after_two_ops != other.list_str_after_two_ops); + } +}; +} // namespace + +// We want to test that lists have equal or different contents, +// with/without clear/fast_clear, etc. +// The same sequences of logical operations +// (erase, insert, etc.) applied to a new list or a fast_cleared list might NOT +// preserve IDs, but should preserve the contents. With clear(), it should ALSO +// preserve IDs. +SCENARIO("Inserting, erasing, clearing tests") { + // These are just some random numbers. + const vector operations{-10, -4, 1, 3, -8, 2, -2, -3, -5, -9, + -6, -2, -7, 2, 5, -8, 6, -4, 10, 7, + -10, -1, 5, 6, 9, 1, 4, -7, -1, 4, + 8, -9, 8, -3, -5, -6, 9, 3, 7, 10}; + + List list; + unsigned next_element = 999; + const Result result_with_new_object(operations, list, next_element); + + // Also test clearing empty objects. + { + // bits 00 mean do nothing, 01 means clear, 11 means fast clear. + const vector clear_options{ + 0, // nothing, + 0x5, // clear, clear, + 0x7, // fast clear, clear, + 0xD, // clear, fast clear, + 0xF, // fast clear, fast clear + 0x15 // clear, clear, clear + }; + for (unsigned option : clear_options) { + List empty_list; + unsigned copy = option; + while (copy != 0) { + const unsigned code = copy & 0x3; + copy >>= 2; + switch (code) { + case 1: + empty_list.clear(); + break; + case 3: + empty_list.fast_clear(); + break; + default: { + REQUIRE(false); + } + } + } + next_element = 999; + const Result result_with_empty_list(operations, empty_list, next_element); + result_with_empty_list.check_equal_contents_without_ids( + result_with_new_object); + result_with_empty_list.check_equal_id_data(result_with_new_object); + } + } + // Now repeat the operations. + list.clear(); + { + INFO("second time, cleared list"); + next_element = 999; + const Result result_with_cleared_object(operations, list, next_element); + result_with_cleared_object.check_equal_contents_without_ids( + result_with_new_object); + result_with_cleared_object.check_equal_id_data(result_with_new_object); + } + list.fast_clear(); + { + INFO("third time, fast cleared list"); + next_element = 999; + const Result result_with_cleared_object(operations, list, next_element); + result_with_cleared_object.check_equal_contents_without_ids( + result_with_new_object); + result_with_cleared_object.check_different_id_data(result_with_new_object); + } +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp new file mode 100644 index 0000000000..65baab5eb8 --- /dev/null +++ b/tket/tests/TokenSwapping/test_VectorListHybridSkeleton.cpp @@ -0,0 +1,308 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include + +#include "TokenSwapping/VectorListHybridSkeleton.hpp" +#include "Utils/RNG.hpp" + +using std::vector; + +namespace tket { +namespace tsa_internal { +namespace tests { + +// A slower implementation of VectorListHybridSkeleton +// using linked lists +struct VLHS_tester_reimplementation { + // Each node will contain the index it was given. + mutable std::list data; + + void clear() { data.clear(); } + size_t size() const { return data.size(); } + size_t front_index() const { return data.front(); } + size_t back_index() const { return data.back(); } + + std::list::iterator find(size_t index) { + for (auto iter = data.begin(); iter != data.end(); ++iter) { + if (*iter == index) { + return iter; + } + } + throw std::runtime_error( + std::string("index ") + std::to_string(index) + " not found"); + } + + std::list::const_iterator find(size_t index) const { + for (auto citer = data.cbegin(); citer != data.cend(); ++citer) { + if (*citer == index) { + return citer; + } + } + throw std::runtime_error( + std::string("index ") + std::to_string(index) + " not found"); + } + + std::optional next(size_t index) const { + auto citer = find(index); + ++citer; + if (citer == data.cend()) { + return {}; + } + return *citer; + } + + std::optional previous(size_t index) const { + auto citer = find(index); + --citer; + if (citer != data.cend()) { + return {}; + } + return *citer; + } + + void erase(size_t index) { + auto iter = find(index); + data.erase(iter); + } + + void insert_for_empty_list(size_t new_index) { + REQUIRE(data.empty()); + data.push_front(new_index); + } + + void insert_after(size_t index, size_t new_index) { + auto iter = find(index); + // We can only insert BEFORE an iter with STL + ++iter; + if (iter == data.end()) { + // We were at the back. + data.push_back(new_index); + return; + } + // We're now after the node, we insert before + data.insert(iter, new_index); + } + + void insert_before(size_t index, size_t new_index) { + auto iter = find(index); + data.insert(iter, new_index); + } +}; + +// Keep track of which indices have currently not yet been erased +struct ValidIndices { + std::set indices; + + bool contains(size_t index) const { return indices.count(index) != 0; } + void check_and_insert_new_index(size_t index) { + REQUIRE(index != VectorListHybridSkeleton::get_invalid_index()); + REQUIRE(indices.count(index) == 0); + indices.insert(index); + } + + void check_and_erase_index(size_t index) { + REQUIRE(indices.count(index) != 0); + indices.erase(index); + } + + size_t get_index(RNG& rng) const { + REQUIRE(!indices.empty()); + auto citer = indices.cbegin(); + for (size_t ii = rng.get_size_t(indices.size() - 1); ii != 0; --ii) { + ++citer; + } + return *citer; + } +}; + +void require_equal_indices( + size_t index, const std::optional& index_opt) { + if (index == VectorListHybridSkeleton::get_invalid_index()) { + REQUIRE(!index_opt); + return; + } + REQUIRE(index_opt); + REQUIRE(index_opt.value() == index); +} + +bool are_equal( + const VectorListHybridSkeleton& vlhs, + const VLHS_tester_reimplementation& tester, + const ValidIndices& valid_indices) { + if (vlhs.size() != tester.size()) { + return false; + } + if (vlhs.size() == 0) { + return true; + } + auto citer = tester.data.cbegin(); + for (auto index = vlhs.front_index(); + index != VectorListHybridSkeleton::get_invalid_index(); + index = vlhs.next(index)) { + if (*citer != index) { + return false; + } + REQUIRE(valid_indices.contains(index)); + ++citer; + } + REQUIRE(citer == tester.data.cend()); + REQUIRE(*tester.data.cbegin() == vlhs.front_index()); + REQUIRE(*tester.data.crbegin() == vlhs.back_index()); + return true; +} + +SCENARIO("Random operations preserve VLHS") { + RNG rng; + VLHS_tester_reimplementation tester; + VectorListHybridSkeleton vlhs; + ValidIndices valid_indices; + REQUIRE(are_equal(vlhs, tester, valid_indices)); + + for (int op_counter = 0; op_counter < 10000; ++op_counter) { + INFO("counter=" << op_counter); + if (op_counter + 1 % 100 == 0) { + vlhs.clear(); + tester.clear(); + valid_indices.indices.clear(); + } + bool should_insert = rng.check_percentage(50); + if (valid_indices.indices.empty()) { + should_insert = true; + } + if (valid_indices.indices.size() > 10) { + should_insert = false; + } + if (should_insert) { + if (valid_indices.indices.empty()) { + vlhs.insert_for_empty_list(); + const auto new_index = vlhs.front_index(); + REQUIRE(new_index == vlhs.back_index()); + tester.insert_for_empty_list(new_index); + valid_indices.check_and_insert_new_index(new_index); + } else { + const auto index = valid_indices.get_index(rng); + const bool insert_after = rng.check_percentage(50); + + if (insert_after) { + vlhs.insert_after(index); + const auto new_index = vlhs.next(index); + tester.insert_after(index, new_index); + valid_indices.check_and_insert_new_index(new_index); + } else { + vlhs.insert_before(index); + const auto new_index = vlhs.previous(index); + tester.insert_before(index, new_index); + valid_indices.check_and_insert_new_index(new_index); + } + } + } else { + // We erase instead. + const auto index = valid_indices.get_index(rng); + vlhs.erase(index); + tester.erase(index); + valid_indices.check_and_erase_index(index); + } + REQUIRE(are_equal(vlhs, tester, valid_indices)); + } +} + +static std::string get_fixed_ops_str(bool do_fast_clear) { + std::stringstream ss; + VectorListHybridSkeleton vlhs; + ss << vlhs.debug_str(); + vlhs.insert_for_empty_list(); + ss << "\nInsert: " << vlhs.debug_str(); + vlhs.insert_after(vlhs.front_index()); + ss << "\nInsert after front: " << vlhs.debug_str(); + const auto id = vlhs.front_index(); + vlhs.insert_before(id); + ss << "\nInsert before front: " << vlhs.debug_str(); + vlhs.insert_after(id); + ss << "\nInsert after " << id << ": " << vlhs.debug_str(); + vlhs.erase(3); + ss << "\nErase 3: " << vlhs.debug_str(); + if (do_fast_clear) { + vlhs.fast_clear(); + ss << "\nFast clear: " << vlhs.debug_str(); + } else { + vlhs.clear(); + ss << "\nClear: " << vlhs.debug_str(); + } + vlhs.insert_for_empty_list(); + ss << "\nInsert: " << vlhs.debug_str(); + return ss.str(); +} + +SCENARIO("Some fixed ops") { + // The only difference should be in the internal link values. + const std::string common_prefix{ + "VLHS: size 0, front NULL back NULL, del.front NULL\n" + "Active links: forward []\n" + "Backward ()\n" + "Del.links: {}\n" + "Insert: VLHS: size 1, front 0 back 0, del.front NULL\n" + "Active links: forward [0->]\n" + "Backward (0->)\n" + "Del.links: {}\n" + "Insert after front: VLHS: size 2, front 0 back 1, del.front NULL\n" + "Active links: forward [0->1->]\n" + "Backward (1->0->)\n" + "Del.links: {}\n" + "Insert before front: VLHS: size 3, front 2 back 1, del.front NULL\n" + "Active links: forward [2->0->1->]\n" + "Backward (1->0->2->)\n" + "Del.links: {}\n" + "Insert after 0: VLHS: size 4, front 2 back 1, del.front NULL\n" + "Active links: forward [2->0->3->1->]\n" + "Backward (1->3->0->2->)\n" + "Del.links: {}\n" + "Erase 3: VLHS: size 3, front 2 back 1, del.front 3\n" + "Active links: forward [2->0->1->]\n" + "Backward (1->0->2->)\n" + "Del.links: {3->}\n"}; + const std::string fast_clear_suffix{ + "Fast clear: VLHS: size 0, front NULL back NULL, del.front 2\n" + "Active links: forward []\n" + "Backward ()\n" + "Del.links: {2->0->1->3->}\n" + "Insert: VLHS: size 1, front 2 back 2, del.front 0\n" + "Active links: forward [2->]\n" + "Backward (2->)\n" + "Del.links: {0->1->3->}"}; + const std::string clear_suffix{ + "Clear: VLHS: size 0, front NULL back NULL, del.front 0\n" + "Active links: forward []\n" + "Backward ()\n" + "Del.links: {0->1->2->3->}\n" + "Insert: VLHS: size 1, front 0 back 0, del.front 1\n" + "Active links: forward [0->]\n" + "Backward (0->)\n" + "Del.links: {1->2->3->}"}; + const auto fast_clear_str = get_fixed_ops_str(true); + CHECK(fast_clear_str == common_prefix + fast_clear_suffix); + + const auto clear_str = get_fixed_ops_str(false); + CHECK(clear_str == common_prefix + clear_suffix); +} + +} // namespace tests +} // namespace tsa_internal +} // namespace tket diff --git a/tket/tests/test_Utils.cpp b/tket/tests/Utils/test_HelperFunctions.cpp similarity index 100% rename from tket/tests/test_Utils.cpp rename to tket/tests/Utils/test_HelperFunctions.cpp diff --git a/tket/tests/Graphs/test_RNG.cpp b/tket/tests/Utils/test_RNG.cpp similarity index 95% rename from tket/tests/Graphs/test_RNG.cpp rename to tket/tests/Utils/test_RNG.cpp index 13a8037446..1b75d568af 100644 --- a/tket/tests/Graphs/test_RNG.cpp +++ b/tket/tests/Utils/test_RNG.cpp @@ -18,16 +18,12 @@ #include #include -#include "RNG.hpp" +#include "Utils/RNG.hpp" -using std::size_t; using std::stringstream; using std::vector; namespace tket { -namespace graphs { -namespace tests { -namespace test_RNG { // Check that the RNG really is identical across all platforms. @@ -146,7 +142,4 @@ SCENARIO("RNG: permutations") { " 69 24 68 71 64 84 36 65 97 98 52 45 ]"); } -} // namespace test_RNG -} // namespace tests -} // namespace graphs } // namespace tket diff --git a/tket/tests/ZX/test_Flow.cpp b/tket/tests/ZX/test_Flow.cpp new file mode 100644 index 0000000000..18ae19bb87 --- /dev/null +++ b/tket/tests/ZX/test_Flow.cpp @@ -0,0 +1,337 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "ZX/Flow.hpp" + +namespace tket { + +namespace zx { + +namespace test_flow { + +SCENARIO("Testing flow verification") { + // Diagram combines Ex. 2.43, "There and back again: a circuit extraction + // tale", Backens et al. 2021 and Ex. C.13, "Relating measurement patterns to + // circuits via Pauli flow", Simmons 2021 + ZXDiagram diag(1, 3, 0, 0); + ZXVertVec ins = diag.get_boundary(ZXType::Input); + ZXVertVec outs = diag.get_boundary(ZXType::Output); + // Gflow example from Backens et al. + ZXVert ga = diag.add_vertex(ZXType::XY, 0.3); + ZXVert gb = diag.add_vertex(ZXType::XY, 0.7); + ZXVert gc = diag.add_vertex(ZXType::XZ, 1.4); + ZXVert gd = diag.add_vertex(ZXType::YZ, 0.9); + diag.add_wire(ins.at(0), ga); + diag.add_wire(ga, gb, ZXWireType::H); + diag.add_wire(gb, gc, ZXWireType::H); + diag.add_wire(gb, gd, ZXWireType::H); + diag.add_wire(gc, gd, ZXWireType::H); + diag.add_wire(gb, outs.at(0), ZXWireType::H); + // Pauli flow example from Simmons (angles cut to Paulis) + ZXVert pi = diag.add_vertex(ZXType::XY, 0.9); + ZXVert pa = diag.add_vertex(ZXType::PZ); + ZXVert pb = diag.add_vertex(ZXType::PX); + ZXVert pc = diag.add_vertex(ZXType::XY, 0.2); + ZXVert pd = diag.add_vertex(ZXGen::create_gen(ZXType::PY, true)); + diag.add_wire(gc, pi, ZXWireType::H); + diag.add_wire(pi, pb, ZXWireType::H); + diag.add_wire(pa, pb, ZXWireType::H); + diag.add_wire(pa, pc, ZXWireType::H); + diag.add_wire(pa, pd, ZXWireType::H); + diag.add_wire(pb, pd, ZXWireType::H); + diag.add_wire(pc, pd, ZXWireType::H); + diag.add_wire(pc, outs.at(1), ZXWireType::H); + diag.add_wire(pd, outs.at(2), ZXWireType::H); + + // Give a valid Pauli flow + std::map c{ + {ga, {gb}}, // Odd = {ga, gc, gd, outs[0]} + {gb, {gc}}, // Odd = {gb, gc, pi} + {gc, {gc, gd}}, // Odd = {gc, gd, pi} + {gd, {gd, outs.at(0), pi}}, // Odd = {pb} + {pi, {pb, outs.at(2)}}, // Odd = {pi, pa} + {pa, {pa, pc, pd, outs.at(2)}}, // Odd = {pd, outs[1], outs[2]} + {pb, {pc, pd, outs.at(1)}}, // Odd = {pb, pd, outs[1], outs[2]} + {pc, {outs.at(1)}}, // Odd = {pc} + {pd, {outs.at(2)}}, // Odd = {pd} + }; + std::map d{ + {ga, 7}, {gb, 6}, {gc, 5}, {gd, 4}, + {pi, 3}, {pa, 2}, {pb, 2}, {pc, 1}, + {pd, 1}, {outs.at(0), 0}, {outs.at(1), 0}, {outs.at(2), 0}, + }; + + Flow fl{c, d}; + REQUIRE_NOTHROW(fl.verify(diag)); + + // Check for ordering of corrections + d.at(ga) = 4; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "A qubit has an X correction in its past"); + d.at(gb) = 3; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "A qubit has a Z correction in its past"); + // Revert to valid flow + d.at(ga) = 7; + d.at(gb) = 6; + + // Check history Y measurements have Y corrections + diag.set_vertex_ZXGen_ptr(pb, ZXGen::create_gen(ZXType::PY)); + c.at(pa) = {pa}; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "A past Y vertex receives a Z correction"); + c.at(pa) = {pa, pc, pd}; + d.at(pd) = 2; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "A past Y vertex receives an X correction"); + // Revert to valid flow + diag.set_vertex_ZXGen_ptr(pb, ZXGen::create_gen(ZXType::PX)); + c.at(pa) = {pa, pc, pd, outs.at(2)}; + d.at(pd) = 1; + + // Check all basis corrections are ok + // Correct XY with I, X, Y + std::vector cs{ + {}, {pc, outs.at(2)}, {pc, outs.at(1), outs.at(2)}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(pc) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "XY vertex must be corrected with a Z"); + } + c.at(pc) = {outs.at(1)}; + // Correct XZ with I, X, Z + cs = {{}, {gc, outs.at(0)}, {pi}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(gc) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "XZ vertex must be corrected with a Y"); + } + c.at(gc) = {gc, gd}; + // Correct YZ with I, Y, Z + diag.set_vertex_ZXGen_ptr(pa, ZXGen::create_gen(ZXType::YZ, Expr(1.2))); + cs = {{}, {pa, pd}, {pc}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(pa) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "YZ vertex must be corrected with an X"); + } + diag.set_vertex_ZXGen_ptr(pa, ZXGen::create_gen(ZXType::PZ)); + c.at(pa) = {pa, pc, pd, outs.at(2)}; + // Correct PX with I, X + diag.set_vertex_ZXGen_ptr(pc, ZXGen::create_gen(ZXType::PX)); + cs = {{}, {pc, outs.at(2)}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(pc) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "PX vertex must be corrected with a Y or Z"); + } + diag.set_vertex_ZXGen_ptr(pc, ZXGen::create_gen(ZXType::XY, Expr(0.2))); + c.at(pc) = {outs.at(1)}; + // Correct PY with I, Y + diag.set_vertex_ZXGen_ptr(pc, ZXGen::create_gen(ZXType::PY)); + cs = {{}, {pc, outs.at(1), outs.at(2)}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(pc) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "PY vertex must be corrected with an X or Z"); + } + diag.set_vertex_ZXGen_ptr(pc, ZXGen::create_gen(ZXType::XY, Expr(0.2))); + c.at(pc) = {outs.at(1)}; + // Correct PZ with I, Z + cs = {{}, {pc, outs.at(2)}}; + for (const ZXVertSeqSet& cc : cs) { + c.at(pa) = cc; + fl = {c, d}; + REQUIRE_THROWS_WITH( + fl.verify(diag), "PZ vertex must be corrected with an X or Y"); + } +} + +SCENARIO("Testing causal flow identification and focussing") { + // Diagram based on Fig. 8, "Determinism in the one-way model", + // Danos & Kashefi 2006 + ZXDiagram diag(2, 2, 0, 0); + ZXVertVec ins = diag.get_boundary(ZXType::Input); + ZXVertVec outs = diag.get_boundary(ZXType::Output); + // Input measurements + ZXVert i0 = diag.add_vertex(ZXType::XY, 0.3); + ZXVert i1 = diag.add_vertex(ZXType::XY, 0.7); + diag.add_wire(ins.at(0), i0); + diag.add_wire(ins.at(1), i1); + // Chain on qubit 0 + ZXVert v0 = diag.add_vertex(ZXType::XY, 1.4); + diag.add_wire(i0, v0, ZXWireType::H); + diag.add_wire(v0, outs.at(0), ZXWireType::H); + // Chain on qubit 1 + ZXVert v1a = diag.add_vertex(ZXType::XY, 0.9); + ZXVert v1b = diag.add_vertex(ZXType::XY, 0.2); + ZXVert v1c = diag.add_vertex(ZXType::XY, 1.2); + ZXVert v1d = diag.add_vertex(ZXType::XY, 1.6); + ZXVert v1e = diag.add_vertex(ZXType::XY, 0.4); + diag.add_wire(i1, v1a, ZXWireType::H); + diag.add_wire(v1a, v1b, ZXWireType::H); + diag.add_wire(v1b, v1c, ZXWireType::H); + diag.add_wire(v1c, v1d, ZXWireType::H); + diag.add_wire(v1d, v1e, ZXWireType::H); + diag.add_wire(v1e, outs.at(1), ZXWireType::H); + // Cross-chain links + diag.add_wire(i0, v1a, ZXWireType::H); + diag.add_wire(i0, v1d, ZXWireType::H); + + Flow f = Flow::identify_causal_flow(diag); + + CHECK(f.c(i0) == ZXVertSeqSet{v0}); + CHECK(f.c(v0) == ZXVertSeqSet{outs.at(0)}); + CHECK(f.c(i1) == ZXVertSeqSet{v1a}); + CHECK(f.c(v1a) == ZXVertSeqSet{v1b}); + CHECK(f.c(v1b) == ZXVertSeqSet{v1c}); + CHECK(f.c(v1c) == ZXVertSeqSet{v1d}); + CHECK(f.c(v1d) == ZXVertSeqSet{v1e}); + CHECK(f.c(v1e) == ZXVertSeqSet{outs.at(1)}); + REQUIRE_NOTHROW(f.verify(diag)); + + REQUIRE_NOTHROW(f.focus(diag)); + CHECK(f.c(i0) == ZXVertSeqSet{v0}); + CHECK(f.c(v0) == ZXVertSeqSet{outs.at(0)}); + CHECK(f.c(i1) == ZXVertSeqSet{v1a, v0, v1c, v1e}); + CHECK(f.c(v1a) == ZXVertSeqSet{v1b, v1d, v0, outs.at(1)}); + CHECK(f.c(v1b) == ZXVertSeqSet{v1c, v1e}); + CHECK(f.c(v1c) == ZXVertSeqSet{v1d, v0, outs.at(1)}); + CHECK(f.c(v1d) == ZXVertSeqSet{v1e}); + CHECK(f.c(v1e) == ZXVertSeqSet{outs.at(1)}); + REQUIRE_NOTHROW(f.verify(diag)); +} + +SCENARIO("Testing Pauli flow identification and focussing") { + // Diagram combines Ex. 2.43, "There and back again: a circuit extraction + // tale", Backens et al. 2021 and Ex. C.13, "Relating measurement patterns to + // circuits via Pauli flow", Simmons 2021 + ZXDiagram diag(1, 3, 0, 0); + ZXVertVec ins = diag.get_boundary(ZXType::Input); + ZXVertVec outs = diag.get_boundary(ZXType::Output); + // Gflow example from Backens et al. + ZXVert ga = diag.add_vertex(ZXType::XY, 0.3); + ZXVert gb = diag.add_vertex(ZXType::XY, 0.7); + ZXVert gc = diag.add_vertex(ZXType::XZ, 1.4); + ZXVert gd = diag.add_vertex(ZXType::YZ, 0.9); + diag.add_wire(ins.at(0), ga); + diag.add_wire(ga, gb, ZXWireType::H); + diag.add_wire(gb, gc, ZXWireType::H); + diag.add_wire(gb, gd, ZXWireType::H); + diag.add_wire(gc, gd, ZXWireType::H); + diag.add_wire(gb, outs.at(0), ZXWireType::H); + // Pauli flow example from Simmons (angles cut to Paulis) + ZXVert pi = diag.add_vertex(ZXType::XY, 0.9); + ZXVert pa = diag.add_vertex(ZXType::PZ); + ZXVert pb = diag.add_vertex(ZXType::PX); + ZXVert pc = diag.add_vertex(ZXType::XY, 0.2); + ZXVert pd = diag.add_vertex(ZXGen::create_gen(ZXType::PY, true)); + diag.add_wire(gc, pi, ZXWireType::H); + diag.add_wire(pi, pb, ZXWireType::H); + diag.add_wire(pa, pb, ZXWireType::H); + diag.add_wire(pa, pc, ZXWireType::H); + diag.add_wire(pa, pd, ZXWireType::H); + diag.add_wire(pb, pd, ZXWireType::H); + diag.add_wire(pc, pd, ZXWireType::H); + diag.add_wire(pc, outs.at(1), ZXWireType::H); + diag.add_wire(pd, outs.at(2), ZXWireType::H); + + Flow f = Flow::identify_pauli_flow(diag); + + REQUIRE_NOTHROW(f.verify(diag)); + REQUIRE_NOTHROW(f.focus(diag)); + REQUIRE_NOTHROW(f.verify(diag)); +} + +SCENARIO("Test focussed set identificaiton") { + // Diagram combines Ex. 2.43, "There and back again: a circuit extraction + // tale", Backens et al. 2021 and Ex. C.13, "Relating measurement patterns to + // circuits via Pauli flow", Simmons 2021 + ZXDiagram diag(1, 3, 0, 0); + ZXVertVec ins = diag.get_boundary(ZXType::Input); + ZXVertVec outs = diag.get_boundary(ZXType::Output); + // Gflow example from Backens et al. + ZXVert ga = diag.add_vertex(ZXType::XY, 0.3); + ZXVert gb = diag.add_vertex(ZXType::XY, 0.7); + ZXVert gc = diag.add_vertex(ZXType::XZ, 1.4); + ZXVert gd = diag.add_vertex(ZXType::YZ, 0.9); + diag.add_wire(ins.at(0), ga); + diag.add_wire(ga, gb, ZXWireType::H); + diag.add_wire(gb, gc, ZXWireType::H); + diag.add_wire(gb, gd, ZXWireType::H); + diag.add_wire(gc, gd, ZXWireType::H); + diag.add_wire(gb, outs.at(0), ZXWireType::H); + // Pauli flow example from Simmons (angles cut to Paulis) + ZXVert pi = diag.add_vertex(ZXType::XY, 0.9); + ZXVert pa = diag.add_vertex(ZXType::PZ); + ZXVert pb = diag.add_vertex(ZXType::PX); + ZXVert pc = diag.add_vertex(ZXType::XY, 0.2); + ZXVert pd = diag.add_vertex(ZXGen::create_gen(ZXType::PY, true)); + diag.add_wire(gc, pi, ZXWireType::H); + diag.add_wire(pi, pb, ZXWireType::H); + diag.add_wire(pa, pb, ZXWireType::H); + diag.add_wire(pa, pc, ZXWireType::H); + diag.add_wire(pa, pd, ZXWireType::H); + diag.add_wire(pb, pd, ZXWireType::H); + diag.add_wire(pc, pd, ZXWireType::H); + diag.add_wire(pc, outs.at(1), ZXWireType::H); + diag.add_wire(pd, outs.at(2), ZXWireType::H); + + std::set focussed = Flow::identify_focussed_sets(diag); + + REQUIRE(focussed.size() == 2); + for (const ZXVertSeqSet& fset : focussed) { + std::map parities; + for (const ZXVert& v : fset.get()) { + ZXType vtype = diag.get_zxtype(v); + REQUIRE( + (vtype == ZXType::Output || vtype == ZXType::XY || + vtype == ZXType::PX || vtype == ZXType::PY)); + for (const ZXVert& n : fset.get()) { + auto inserted = parities.insert({n, 1}); + if (!inserted.second) { + ++(inserted.first->second); + } + } + } + for (const std::pair& p : parities) { + if (p.second % 2 == 1) { + ZXType vtype = diag.get_zxtype(p.first); + REQUIRE(( + vtype == ZXType::Output || vtype == ZXType::XZ || + vtype == ZXType::YZ || vtype == ZXType::PY || vtype == ZXType::PZ)); + REQUIRE( + (vtype != ZXType::PY || + fset.get().find(p.first) != fset.get().end())); + } + } + } +} + +} // namespace test_flow + +} // namespace zx + +} // namespace tket diff --git a/tket/tests/ZX/test_ZXDiagram.cpp b/tket/tests/ZX/test_ZXDiagram.cpp index 1d10149b7c..37a15f4c7e 100644 --- a/tket/tests/ZX/test_ZXDiagram.cpp +++ b/tket/tests/ZX/test_ZXDiagram.cpp @@ -31,7 +31,7 @@ SCENARIO("Testing generator creation") { CHECK_FALSE(input.valid_edge(0, QuantumType::Quantum)); CHECK_FALSE(input.valid_edge(std::nullopt, QuantumType::Classical)); - BasicGen zSpider(ZXType::ZSpider, 0.3, QuantumType::Classical); + PhasedGen zSpider(ZXType::ZSpider, 0.3, QuantumType::Classical); CHECK(zSpider.get_name() == "C-Z(0.3)"); CHECK(zSpider.get_type() == ZXType::ZSpider); CHECK(zSpider.get_qtype() == QuantumType::Classical); @@ -40,7 +40,7 @@ SCENARIO("Testing generator creation") { CHECK(zSpider.valid_edge(std::nullopt, QuantumType::Classical)); CHECK_FALSE(zSpider.valid_edge(0, QuantumType::Quantum)); - BasicGen xSpider(ZXType::XSpider, Expr("2*a"), QuantumType::Quantum); + PhasedGen xSpider(ZXType::XSpider, Expr("2*a"), QuantumType::Quantum); CHECK(xSpider.get_name() == "Q-X(2*a)"); CHECK(xSpider.get_type() == ZXType::XSpider); CHECK(xSpider.get_qtype() == QuantumType::Quantum); @@ -51,9 +51,20 @@ SCENARIO("Testing generator creation") { Sym a = SymEngine::symbol("a"); sub_map[a] = Expr(0.8); CHECK(xSpider.symbol_substitution(sub_map)->get_name() == "Q-X(1.6)"); + CHECK( + *xSpider.symbol_substitution(sub_map) == + PhasedGen(ZXType::XSpider, 1.6, QuantumType::Quantum)); + + CliffordGen px(ZXType::PX, true, QuantumType::Classical); + CHECK(px.get_name() == "C-X(1)"); + CHECK(px.get_type() == ZXType::PX); + CHECK(px.get_param() == true); + CHECK(px.free_symbols().empty()); + CHECK(!(px == CliffordGen(ZXType::PX, false, QuantumType::Quantum))); + CHECK(px == CliffordGen(ZXType::PX, true, QuantumType::Classical)); // Should throw an error: type Triangle is not a BasicGen type - REQUIRE_THROWS_AS(BasicGen(ZXType::Triangle, 0.3), ZXError); + REQUIRE_THROWS_AS(PhasedGen(ZXType::Triangle, 0.3), ZXError); DirectedGen tri(ZXType::Triangle, QuantumType::Classical); CHECK(tri.get_name() == "C-Tri"); diff --git a/tket/tests/test_AASRoute.cpp b/tket/tests/test_AASRoute.cpp new file mode 100644 index 0000000000..9b75dd97dd --- /dev/null +++ b/tket/tests/test_AASRoute.cpp @@ -0,0 +1,877 @@ +#include +#include + +#include "Circuit/Circuit.hpp" +#include "Mapping/AASLabelling.hpp" +#include "Mapping/AASRoute.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "OpType/OpType.hpp" +#include "OpType/OpTypeFunctions.hpp" +#include "Predicates/CompilationUnit.hpp" +#include "Predicates/CompilerPass.hpp" +#include "Predicates/PassGenerators.hpp" +#include "Predicates/PassLibrary.hpp" +#include "Simulation/CircuitSimulator.hpp" +#include "Simulation/ComparisonFunctions.hpp" +#include "Transformations/ContextualReduction.hpp" +#include "testutil.hpp" + +namespace tket { +SCENARIO("Test aas route in RV3") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4), Node("node_test", 5), + Node("test_node", 6), Node("node_test", 7), Node("node_test", 8), + Node("node_test", 9), Node("node_test", 10)}; + + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[2], nodes[5]}, + {nodes[5], nodes[6]}, + {nodes[4], nodes[7]}, + {nodes[7], nodes[8]}, + {nodes[8], nodes[9]}, + {nodes[9], nodes[10]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + GIVEN( + "AASRoute - test AASRouteRoutingMethod routing_method placed and gates") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + + circ.rename_units(rename_map); + + std::shared_ptr mf = + std::make_shared(circ); + + AASRouteRoutingMethod aasrm(1, aas::CNotSynthType::Rec); + + // this will fail because the cx ae in fron of the ppb + REQUIRE(!aasrm.routing_method(mf, shared_arc).first); + } + GIVEN("AASRoute - test AASRouteRoutingMethod routing_method placed") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + + Circuit ppb_circ(11); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + ppb_circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + ppb_circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + ppb_circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + ppb_circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + circ.rename_units(rename_map); + + std::shared_ptr mf = + std::make_shared(circ); + + AASRouteRoutingMethod aasrm(1, aas::CNotSynthType::Rec); + + REQUIRE(aasrm.routing_method(mf, shared_arc).first); + } + GIVEN("AASRoute - test AASRouteRoutingMethod routing_method unplaced") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + + Circuit ppb_circ(11); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + ppb_circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + ppb_circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + ppb_circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + ppb_circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + std::shared_ptr mf = + std::make_shared(circ); + + AASRouteRoutingMethod aasrm(1, aas::CNotSynthType::Rec); + + // this will fail because of the unplaced qubits + REQUIRE(!aasrm.routing_method(mf, shared_arc).first); + } + GIVEN("AASRouteRoutingMethod - test routing_method I") { + std::vector nodes_mixed = { + Node("node_test", 0), Node("test_node", 1), Node("node_test", 2)}; + + Architecture architecture_mixed( + {{nodes_mixed[0], nodes_mixed[1]}, {nodes_mixed[1], nodes_mixed[2]}}); + ArchitecturePtr shared_arc_mixed = + std::make_shared(architecture_mixed); + + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes_mixed[0]}, + {qubits[1], nodes_mixed[1]}, + {qubits[2], nodes_mixed[2]}}; + + Circuit ppb_circ(3); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + PhasePolyBox ppbox(ppb_circ); + + circ.add_box(ppbox, qubits); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + + // testing this without interacting with the lexilabelling or aas labelling + circ.rename_units(rename_map); + + /*std::vector qubits_after_rename = circ.all_qubits(); + + for (const Command& com : circ) { + OpType ot = com.get_op_ptr()->get_type(); + unit_vector_t qbs = com.get_args(); + switch (ot) { + case OpType::PhasePolyBox: { + Op_ptr op = com.get_op_ptr(); + const PhasePolyBox& ppb = static_cast(*op); + Circuit circuit_ppb_place(*ppb.to_circuit()); + } + } + } + + Circuit circ_flatt_copy(circ); + + circ_flatt_copy.flatten_registers(); + + std::vector qubits_after_flatten = circ_flatt_copy.all_qubits(); + + for (const Command& com : circ_flatt_copy) { + OpType ot = com.get_op_ptr()->get_type(); + unit_vector_t qbs = com.get_args(); + switch (ot) { + case OpType::PhasePolyBox: { + Op_ptr op = com.get_op_ptr(); + const PhasePolyBox& ppb = static_cast(*op); + Circuit circuit_ppb_place(*ppb.to_circuit()); + } + } + }*/ + + Circuit circ_copy(circ); + + std::shared_ptr mf = + std::make_shared(circ); + + AASRouteRoutingMethod aasrm(1, aas::CNotSynthType::Rec); + + REQUIRE(aasrm.routing_method(mf, shared_arc_mixed).first); + // aasrm.routing_method(mf, shared_arc_mixed); + + REQUIRE(test_unitary_comparison(mf->circuit_, circ_copy)); + + PredicatePtr routed_correctly = + std::make_shared(architecture_mixed); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu0(mf->circuit_, preds); + REQUIRE(cu0.check_all_predicates()); + } + GIVEN("AASRouteRoutingMethod - test routing_method II") { + std::vector nodes_mixed = { + Node("node_test", 0), Node("test_node", 1), Node("node_test", 2)}; + + Architecture architecture_mixed( + {{nodes_mixed[0], nodes_mixed[1]}, {nodes_mixed[1], nodes_mixed[2]}}); + + ArchitecturePtr shared_arc_mixed = + std::make_shared(architecture_mixed); + + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes_mixed[0]}, + {qubits[1], nodes_mixed[1]}, + {qubits[2], nodes_mixed[2]}}; + + Circuit ppb_circ(3); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + ppb_circ.add_op(OpType::Rz, 0.22, {qubits[0]}); + ppb_circ.add_op(OpType::Rz, 0.33, {qubits[1]}); + ppb_circ.add_op(OpType::Rz, 0.55, {qubits[2]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + + circ.rename_units(rename_map); + + /*std::vector qubits_after_rename = circ.all_qubits(); + + for (const Command& com : circ) { + OpType ot = com.get_op_ptr()->get_type(); + unit_vector_t qbs = com.get_args(); + switch (ot) { + case OpType::PhasePolyBox: { + Op_ptr op = com.get_op_ptr(); + const PhasePolyBox& ppb = static_cast(*op); + Circuit circuit_ppb_place(*ppb.to_circuit()); + } + } + } + + Circuit circ_flatt_copy(circ); + + circ_flatt_copy.flatten_registers(); + + std::vector qubits_after_flatten = circ_flatt_copy.all_qubits(); + + for (const Command& com : circ_flatt_copy) { + OpType ot = com.get_op_ptr()->get_type(); + unit_vector_t qbs = com.get_args(); + switch (ot) { + case OpType::PhasePolyBox: { + Op_ptr op = com.get_op_ptr(); + const PhasePolyBox& ppb = static_cast(*op); + Circuit circuit_ppb_place(*ppb.to_circuit()); + } + } + }*/ + + Circuit circ_copy(circ); + + std::shared_ptr mf = + std::make_shared(circ); + + AASRouteRoutingMethod aasrm(1, aas::CNotSynthType::Rec); + + REQUIRE(aasrm.routing_method(mf, shared_arc_mixed).first); + // aasrm.routing_method(mf, shared_arc_mixed); + + REQUIRE(test_unitary_comparison(mf->circuit_, circ_copy)); + + PredicatePtr routed_correctly = + std::make_shared(architecture_mixed); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu0(mf->circuit_, preds); + REQUIRE(cu0.check_all_predicates()); + } + GIVEN("AASRouteRoutingMethod - test routing_method III") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + + Circuit ppb_circ(11); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + ppb_circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + ppb_circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + ppb_circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + ppb_circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + circ.rename_units(rename_map); + + Circuit circ_copy(circ); + + std::shared_ptr mf = + std::make_shared(circ); + + AASRouteRoutingMethod aasrm(1, aas::CNotSynthType::Rec); + + REQUIRE(aasrm.routing_method(mf, shared_arc).first); + + // aasrm.routing_method(mf, shared_arc); + + REQUIRE(test_unitary_comparison(mf->circuit_, circ_copy)); + + PredicatePtr routed_correctly = + std::make_shared(architecture); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu0(mf->circuit_, preds); + REQUIRE(cu0.check_all_predicates()); + } + GIVEN("AASRouteRoutingMethod and LexiRouteRoutingMethod I") { + std::vector nodes_mixed = { + Node("node_test", 0), Node("test_node", 1), Node("node_test", 2)}; + + Architecture architecture_mixed( + {{nodes_mixed[0], nodes_mixed[1]}, {nodes_mixed[1], nodes_mixed[2]}}); + + ArchitecturePtr shared_arc_mixed = + std::make_shared(architecture_mixed); + + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes_mixed[0]}, + {qubits[1], nodes_mixed[1]}, + {qubits[2], nodes_mixed[2]}}; + + Circuit ppb_circ(3); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + circ.rename_units(rename_map); + + Circuit circ_copy(circ); + + MappingManager mm(shared_arc_mixed); + + std::vector vrm = { + std::make_shared(1, aas::CNotSynthType::Rec), + std::make_shared(100), + }; + + mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = + std::make_shared(architecture_mixed); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + REQUIRE(cu.check_all_predicates()); + REQUIRE(test_unitary_comparison(circ, circ_copy)); + REQUIRE(circ.n_gates() == 4); + } + + GIVEN("AASRouteRoutingMethod and LexiRouteRoutingMethod II") { + std::vector nodes_mixed = { + Node("node_test", 0), Node("test_node", 1), Node("node_test", 2)}; + + Architecture architecture_mixed( + {{nodes_mixed[0], nodes_mixed[1]}, {nodes_mixed[1], nodes_mixed[2]}}); + + ArchitecturePtr shared_arc_mixed = + std::make_shared(architecture_mixed); + + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes_mixed[0]}, + {qubits[1], nodes_mixed[1]}, + {qubits[2], nodes_mixed[2]}}; + + Circuit ppb_circ(3); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + + circ.rename_units(rename_map); + + Circuit circ_copy(circ); + + MappingManager mm(shared_arc_mixed); + + std::vector vrm = { + std::make_shared(1), + std::make_shared(100), + }; + + mm.route_circuit(circ, vrm); + } + GIVEN("AASRouteRoutingMethod and LexiRouteRoutingMethod III") { + std::vector nodes_mixed = { + Node("node_test", 0), Node("test_node", 1), Node("node_test", 2)}; + + Architecture architecture_mixed( + {{nodes_mixed[0], nodes_mixed[1]}, {nodes_mixed[1], nodes_mixed[2]}}); + + ArchitecturePtr shared_arc_mixed = + std::make_shared(architecture_mixed); + + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes_mixed[0]}, + {qubits[1], nodes_mixed[1]}, + {qubits[2], nodes_mixed[2]}}; + + Circuit ppb_circ(3); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + Circuit circ_copy(circ); + + MappingManager mm(shared_arc_mixed); + + std::vector vrm = { + std::make_shared(1, aas::CNotSynthType::Rec), + std::make_shared(), + }; + + mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = + std::make_shared(architecture_mixed); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + REQUIRE(cu.check_all_predicates()); + REQUIRE(test_unitary_comparison(circ, circ_copy)); + REQUIRE(circ.n_gates() == 1); + } + GIVEN("AASRouteRoutingMethod and LexiRouteRoutingMethod IV") { + std::vector nodes_mixed = { + Node("node_test", 0), Node("test_node", 1), Node("node_test", 2)}; + + Architecture architecture_mixed( + {{nodes_mixed[0], nodes_mixed[1]}, {nodes_mixed[1], nodes_mixed[2]}}); + + ArchitecturePtr shared_arc_mixed = + std::make_shared(architecture_mixed); + + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes_mixed[0]}, + {qubits[1], nodes_mixed[1]}, + {qubits[2], nodes_mixed[2]}}; + + Circuit ppb_circ(3); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + Circuit circ_copy(circ); + + MappingManager mm(shared_arc_mixed); + + std::vector vrm = { + std::make_shared(1, aas::CNotSynthType::Rec), + std::make_shared(), + }; + + mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = + std::make_shared(architecture_mixed); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + REQUIRE(cu.check_all_predicates()); + REQUIRE(test_unitary_comparison(circ, circ_copy)); + REQUIRE(circ.n_gates() == 4); + } + GIVEN("AASRouteRoutingMethod and LexiRouteRoutingMethod V") { + std::vector nodes_mixed = { + Node("node_test", 0), Node("test_node", 1), Node("node_test", 2)}; + + Architecture architecture_mixed( + {{nodes_mixed[0], nodes_mixed[1]}, {nodes_mixed[1], nodes_mixed[2]}}); + + ArchitecturePtr shared_arc_mixed = + std::make_shared(architecture_mixed); + + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes_mixed[0]}, + {qubits[1], nodes_mixed[1]}, + {qubits[2], nodes_mixed[2]}}; + + Circuit ppb_circ(3); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + circ.rename_units(rename_map); + + Circuit circ_copy(circ); + + MappingManager mm(shared_arc_mixed); + + std::vector vrm = { + std::make_shared(1, aas::CNotSynthType::Rec), + std::make_shared(), + std::make_shared(100), + }; + + mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = + std::make_shared(architecture_mixed); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + REQUIRE(cu.check_all_predicates()); + REQUIRE(test_unitary_comparison(circ, circ_copy)); + REQUIRE(circ.n_gates() == 4); + } + GIVEN("AASRouteRoutingMethod and LexiRouteRoutingMethod VI") { + std::vector nodes_mixed = { + Node("node_test", 0), Node("test_node", 1), Node("node_test", 2)}; + + Architecture architecture_mixed( + {{nodes_mixed[0], nodes_mixed[1]}, {nodes_mixed[1], nodes_mixed[2]}}); + + ArchitecturePtr shared_arc_mixed = + std::make_shared(architecture_mixed); + + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes_mixed[0]}, + {qubits[1], nodes_mixed[1]}, + {qubits[2], nodes_mixed[2]}}; + + Circuit ppb_circ(3); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + circ.rename_units(rename_map); + + Circuit circ_copy(circ); + + MappingManager mm(shared_arc_mixed); + + std::vector vrm = { + std::make_shared(1, aas::CNotSynthType::Rec), + std::make_shared(), + std::make_shared(100), + std::make_shared(), + }; + + mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = + std::make_shared(architecture_mixed); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + REQUIRE(cu.check_all_predicates()); + REQUIRE(test_unitary_comparison(circ, circ_copy)); + REQUIRE(circ.n_gates() == 4); + } + + GIVEN("AASRouteRoutingMethod and LexiRouteRoutingMethod VII") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + + Circuit ppb_circ(11); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + ppb_circ.add_op(OpType::CX, {qubits[2], qubits[5]}); + ppb_circ.add_op(OpType::CX, {qubits[5], qubits[4]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + + circ.rename_units(rename_map); + + Circuit circ_copy(circ); + + MappingManager mm(shared_arc); + + std::vector vrm = { + std::make_shared(), + std::make_shared(1, aas::CNotSynthType::Rec), + std::make_shared(), + std::make_shared(100), + }; + + mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = + std::make_shared(architecture); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + REQUIRE(cu.check_all_predicates()); + + // REQUIRE(test_unitary_comparison(circ, circ_copy)); + // will fail because of the inserted swaps + REQUIRE(circ.n_gates() == 18); + REQUIRE(circ.count_gates(OpType::CX) == 15); + REQUIRE(circ.count_gates(OpType::SWAP) == 3); + } + GIVEN("AASRouteRoutingMethod and LexiRouteRoutingMethod, only route") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + + Circuit ppb_circ(11); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + ppb_circ.add_op(OpType::CX, {qubits[2], qubits[5]}); + ppb_circ.add_op(OpType::CX, {qubits[5], qubits[4]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + circ.rename_units(rename_map); + + Circuit circ_copy(circ); + + MappingManager mm(shared_arc); + + std::vector vrm = { + std::make_shared(), + std::make_shared(1, aas::CNotSynthType::Rec), + std::make_shared(), + std::make_shared(100), + }; + + mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = + std::make_shared(architecture); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + REQUIRE(cu.check_all_predicates()); + + REQUIRE(circ.n_gates() == 61); + REQUIRE(circ.count_gates(OpType::CX) == 37); + REQUIRE(circ.count_gates(OpType::SWAP) == 21); + } + GIVEN( + "AASRouteRoutingMethod and LexiRouteRoutingMethod only route two " + "boxes") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + + Circuit ppb_circ(11); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + ppb_circ.add_op(OpType::CX, {qubits[2], qubits[5]}); + ppb_circ.add_op(OpType::CX, {qubits[7], qubits[4]}); + ppb_circ.add_op(OpType::CX, {qubits[8], qubits[4]}); + ppb_circ.add_op(OpType::CX, {qubits[9], qubits[4]}); + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + Circuit ppb_circ_2(11); + + ppb_circ_2.add_op(OpType::CX, {qubits[0], qubits[2]}); + ppb_circ_2.add_op(OpType::CX, {qubits[2], qubits[5]}); + ppb_circ_2.add_op(OpType::CX, {qubits[5], qubits[4]}); + + PhasePolyBox ppbox2(ppb_circ_2); + circ.add_box(ppbox2, qubits); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + circ.rename_units(rename_map); + + Circuit circ_copy(circ); + + MappingManager mm(shared_arc); + + std::vector vrm = { + std::make_shared(), + std::make_shared(1, aas::CNotSynthType::Rec), + std::make_shared(), + std::make_shared(100), + }; + + mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = + std::make_shared(architecture); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + REQUIRE(cu.check_all_predicates()); + + REQUIRE(circ.n_gates() == 115); + REQUIRE(circ.count_gates(OpType::CX) == 89); + REQUIRE(circ.count_gates(OpType::SWAP) == 24); + } + GIVEN("AAS + Lexi, Label and Route") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + + Circuit ppb_circ(11); + + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + ppb_circ.add_op(OpType::CX, {qubits[2], qubits[5]}); + ppb_circ.add_op(OpType::CX, {qubits[7], qubits[4]}); + ppb_circ.add_op(OpType::CX, {qubits[8], qubits[4]}); + ppb_circ.add_op(OpType::CX, {qubits[9], qubits[4]}); + ppb_circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + + PhasePolyBox ppbox(ppb_circ); + circ.add_box(ppbox, qubits); + + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + Circuit ppb_circ_2(11); + + ppb_circ_2.add_op(OpType::CX, {qubits[0], qubits[2]}); + ppb_circ_2.add_op(OpType::CX, {qubits[2], qubits[5]}); + ppb_circ_2.add_op(OpType::CX, {qubits[5], qubits[4]}); + + PhasePolyBox ppbox2(ppb_circ_2); + circ.add_box(ppbox2, qubits); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + Circuit circ_copy(circ); + + MappingManager mm(shared_arc); + + std::vector vrm = { + std::make_shared(), + std::make_shared(1, aas::CNotSynthType::Rec), + std::make_shared(), + std::make_shared(100), + }; + + mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = + std::make_shared(architecture); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + REQUIRE(cu.check_all_predicates()); + + REQUIRE(circ.n_gates() == 275); + REQUIRE(circ.count_gates(OpType::CX) == 244); + REQUIRE(circ.count_gates(OpType::SWAP) == 28); + } +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_ArchitectureAwareSynthesis.cpp b/tket/tests/test_ArchitectureAwareSynthesis.cpp new file mode 100644 index 0000000000..e6ca39ece5 --- /dev/null +++ b/tket/tests/test_ArchitectureAwareSynthesis.cpp @@ -0,0 +1,372 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "Architecture/Architecture.hpp" +#include "Predicates/CompilerPass.hpp" +#include "Predicates/PassGenerators.hpp" +#include "Simulation/CircuitSimulator.hpp" +#include "Simulation/ComparisonFunctions.hpp" +#include "testutil.hpp" + +namespace tket { +using Connection = Architecture::Connection; +SCENARIO("Routing of aas example") { + GIVEN("aas routing - simple example") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example II") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example III") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example IV") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::Rz, 0.1, {2}); + circ.add_op(OpType::Rz, 0.1, {3}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.3, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example V") { + Architecture arc(std::vector{{Node(0), Node(1)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(2); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example VI") { + Architecture arc(std::vector{{Node(0), Node(2)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(2); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + + CompilationUnit cu(circ); + + REQUIRE(pass->apply(cu)); + + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + + const auto s = tket_sim::get_unitary(circ); + const auto s1 = tket_sim::get_unitary(result); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + s, s1, tket_sim::MatrixEquivalence::EQUAL)); + } + GIVEN("aas routing - simple example VII") { + Architecture arc(std::vector{ + {Node(0), Node(2)}, {Node(2), Node(4)}, {Node(4), Node(6)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::Rz, 0.1, {2}); + circ.add_op(OpType::Rz, 0.1, {3}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + + REQUIRE(pass->apply(cu)); + + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + + const auto s = tket_sim::get_unitary(circ); + const auto s1 = tket_sim::get_unitary(result); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + s, s1, tket_sim::MatrixEquivalence::EQUAL)); + } + GIVEN("aas routing - simple example VIII") { + Architecture arc(std::vector{ + {Node(1000), Node(10)}, {Node(10), Node(100)}, {Node(100), Node(1)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::Rz, 0.1, {2}); + circ.add_op(OpType::Rz, 0.1, {3}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::H, {2}); + circ.add_op(OpType::H, {3}); + + CompilationUnit cu(circ); + + REQUIRE(pass->apply(cu)); + + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - simple example IX, other gate set") { + Architecture arc(std::vector{ + {Node(1000), Node(10)}, {Node(10), Node(100)}, {Node(100), Node(1)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(4); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + circ.add_op(OpType::X, {3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::Rz, 0.1, {2}); + circ.add_op(OpType::Rz, 0.1, {3}); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + circ.add_op(OpType::X, {3}); + + CompilationUnit cu(circ); + + REQUIRE(pass->apply(cu)); + + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing with measure") { + Architecture arc(std::vector{{Node(0), Node(2)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(2, 2); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.1, {1}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::H, {1}); + for (unsigned mes = 0; mes < 2; ++mes) { + circ.add_measure(mes, mes); + } + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + } + GIVEN("aas routing - circuit with fewer qubits then nodes in the arch") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(3); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.2, {1}); + circ.add_op(OpType::Rz, 0.3, {2}); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + } + GIVEN("aas routing - circuit with fewer qubits then nodes in the arch II") { + Architecture arc(std::vector{ + {Node(0), Node(1)}, + {Node(1), Node(2)}, + {Node(2), Node(3)}, + {Node(3), Node(4)}}); + PassPtr pass = gen_full_mapping_pass_phase_poly(arc); + Circuit circ(3); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::Rz, 0.1, {0}); + circ.add_op(OpType::Rz, 0.2, {1}); + circ.add_op(OpType::Rz, 0.3, {2}); + circ.add_op(OpType::X, {0}); + circ.add_op(OpType::X, {1}); + circ.add_op(OpType::X, {2}); + + CompilationUnit cu(circ); + REQUIRE(pass->apply(cu)); + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(circ, result)); + } +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_BoxDecompRoutingMethod.cpp b/tket/tests/test_BoxDecompRoutingMethod.cpp new file mode 100644 index 0000000000..00d4b19323 --- /dev/null +++ b/tket/tests/test_BoxDecompRoutingMethod.cpp @@ -0,0 +1,148 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include + +#include "Mapping/BoxDecomposition.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Predicates/Predicates.hpp" +#include "Simulation/CircuitSimulator.hpp" +#include "Simulation/ComparisonFunctions.hpp" + +namespace tket { +SCENARIO("Decompose boxes") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + Eigen::Matrix4cd m; + m << 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0; + Unitary2qBox ubox(m); + + GIVEN("A box") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + + circ.add_box(ubox, {0, 2}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + MappingFrontier_ptr mf = std::make_shared(circ); + BoxDecomposition bd(shared_arc, mf); + bd.solve(); + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + std::vector commands = mf->circuit_.get_commands(); + for (Command c : commands) { + REQUIRE(!c.get_op_ptr()->get_desc().is_box()); + } + } + + GIVEN("A conditional box") { + Circuit circ(4, 1); + std::vector qubits = circ.all_qubits(); + Conditional cond(std::make_shared(ubox), 1, 1); + circ.add_op( + std::make_shared(cond), {Bit(0), Qubit(0), Qubit(1)}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + BoxDecomposition bd(shared_arc, mf); + bd.solve(); + std::vector commands = mf->circuit_.get_commands(); + for (Command c : commands) { + Op_ptr op = c.get_op_ptr(); + REQUIRE( + !(op->get_desc().is_box() || (op->get_type() == OpType::Conditional && + static_cast(*op) + .get_op() + ->get_desc() + .is_box()))); + } + } + + GIVEN("Test BoxDecompositionRoutingMethod") { + Circuit circ(4, 1); + std::vector qubits = circ.all_qubits(); + circ.add_box(ubox, {0, 3}); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + circ.add_box(ubox, {1, 3}); + circ.add_box(ubox, {0, 1}); + circ.add_op(OpType::X, {qubits[1]}); + circ.add_op(OpType::Measure, {0, 0}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + MappingManager mm(shared_arc); + std::vector vrm = { + + std::make_shared(10), + std::make_shared()}; + bool res = mm.route_circuit(circ, vrm); + REQUIRE(res); + PredicatePtr routed_correctly = + std::make_shared(architecture); + REQUIRE(routed_correctly->verify(circ)); + std::vector commands = mf->circuit_.get_commands(); + for (Command c : commands) { + REQUIRE(!c.get_op_ptr()->get_desc().is_box()); + } + } +} + +SCENARIO("Test JSON serialisation for BoxDecompositionRoutingMethod") { + GIVEN("BoxDecompositionRoutingMethod") { + nlohmann::json j_rm; + j_rm["name"] = "BoxDecompositionRoutingMethod"; + BoxDecompositionRoutingMethod rm_loaded = + BoxDecompositionRoutingMethod::deserialize(j_rm); + nlohmann::json j_rm_serialised = rm_loaded.serialize(); + REQUIRE(j_rm == j_rm_serialised); + } + + GIVEN("BoxDecompositionRoutingMethod vector") { + nlohmann::json j_rms = { + {{"name", "BoxDecompositionRoutingMethod"}}, + { + {"name", "LexiRouteRoutingMethod"}, + {"depth", 3}, + }}; + std::vector rms = + j_rms.get>(); + nlohmann::json j_rms_serialised = rms; + REQUIRE(j_rms == j_rms_serialised); + } +} + +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_CompilerPass.cpp b/tket/tests/test_CompilerPass.cpp index 124077c896..875fed5251 100644 --- a/tket/tests/test_CompilerPass.cpp +++ b/tket/tests/test_CompilerPass.cpp @@ -15,14 +15,17 @@ #include #include +#include "Circuit/CircPool.hpp" #include "Circuit/Circuit.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" #include "OpType/OpType.hpp" #include "OpType/OpTypeFunctions.hpp" +#include "Placement/Placement.hpp" #include "Predicates/CompilationUnit.hpp" #include "Predicates/CompilerPass.hpp" #include "Predicates/PassGenerators.hpp" #include "Predicates/PassLibrary.hpp" -#include "Routing/Placement.hpp" #include "Simulation/CircuitSimulator.hpp" #include "Simulation/ComparisonFunctions.hpp" #include "Transformations/ContextualReduction.hpp" @@ -102,13 +105,7 @@ SCENARIO("Test that qubits added via add_qubit are tracked.") { circ.add_qubit(weird_qb2); circ.add_bit(weird_cb); - unit_bimap_t* ubmap_initial_missing = circ.unit_bimaps_.initial; - REQUIRE(!ubmap_initial_missing); - CompilationUnit cu(circ); - // At initialisation, circuit bimaps are set to nullptr - unit_bimap_t* ubmap_initial = circ.unit_bimaps_.initial; - REQUIRE(!ubmap_initial); // circuit bimaps property wont be changed, nor will compilation unit circ.add_qubit(weird_qb3); @@ -118,11 +115,16 @@ SCENARIO("Test that qubits added via add_qubit are tracked.") { REQUIRE(it == cu_initial.left.end()); // Instead add transform for running it - Transform t = Transform([](Circuit& circ) { - Qubit weird_qb4("weird_qb", 9); - circ.add_qubit(weird_qb4); - return true; - }); + Transform t = + Transform([](Circuit& circ, std::shared_ptr maps) { + Qubit weird_qb4("weird_qb", 9); + circ.add_qubit(weird_qb4); + if (maps) { + maps->initial.left.insert({weird_qb4, weird_qb4}); + maps->final.left.insert({weird_qb4, weird_qb4}); + } + return true; + }); // convert to pass PredicatePtrMap s_ps; @@ -149,7 +151,7 @@ SCENARIO("Test making (mostly routing) passes using PassGenerators") { GIVEN("Correct pass for Predicate") { SquareGrid grid(1, 5); - PassPtr cp_route = gen_default_mapping_pass(grid); + PassPtr cp_route = gen_default_mapping_pass(grid, false); Circuit circ(5); add_2qb_gates(circ, OpType::CX, {{0, 1}, {0, 2}, {0, 3}, {1, 2}, {3, 4}}); @@ -170,7 +172,7 @@ SCENARIO("Test making (mostly routing) passes using PassGenerators") { GIVEN("Incorrect pass for Predicate logs a warning") { SquareGrid grid(2, 3); - PassPtr cp_route = gen_default_mapping_pass(grid); + PassPtr cp_route = gen_default_mapping_pass(grid, false); Circuit circ(6); add_2qb_gates(circ, OpType::CX, {{0, 1}, {0, 5}, {0, 3}, {1, 2}, {3, 4}}); @@ -212,13 +214,13 @@ SCENARIO("Test making (mostly routing) passes using PassGenerators") { CompilationUnit::make_type_pair(gsp)}; CompilationUnit cu(circ, preds); - PassPtr cp_route = gen_default_mapping_pass(grid); + PassPtr cp_route = gen_default_mapping_pass(grid, false); Circuit cx(2); cx.add_op(OpType::CX, {0, 1}); PassPtr pz_rebase = gen_rebase_pass( - {OpType::CX}, cx, {OpType::PhasedX, OpType::Rz}, - Transforms::tk1_to_PhasedXRz); + {OpType::CX, OpType::PhasedX, OpType::Rz}, cx, + CircPool::tk1_to_PhasedXRz); PassPtr all_passes = SynthesiseTket() >> cp_route >> pz_rebase; REQUIRE(all_passes->apply(cu)); @@ -234,12 +236,13 @@ SCENARIO("Test making (mostly routing) passes using PassGenerators") { } } GIVEN("Synthesise Passes in a row then routing") { - Circuit circ(4); + Circuit circ(5); circ.add_op(OpType::H, {0}); circ.add_op(OpType::CZ, {0, 1}); circ.add_op(OpType::CH, {0, 2}); circ.add_op(OpType::CnX, {0, 1, 2, 3}); circ.add_op(OpType::CZ, {0, 1}); + circ.add_op(OpType::X, {4}); OpTypeSet ots = {OpType::CX, OpType::TK1, OpType::SWAP}; PredicatePtr gsp = std::make_shared(ots); SquareGrid grid(2, 3); @@ -253,7 +256,10 @@ SCENARIO("Test making (mostly routing) passes using PassGenerators") { CompilationUnit cu(circ, preds); PlacementPtr pp = std::make_shared(grid); - PassPtr cp_route = gen_full_mapping_pass(grid, pp, {50, 0, 0, 0}); + PassPtr cp_route = gen_full_mapping_pass( + grid, pp, + {std::make_shared(), + std::make_shared()}); PassPtr all_passes = SynthesiseHQS() >> SynthesiseOQC() >> SynthesiseUMD() >> SynthesiseTket() >> cp_route; @@ -381,7 +387,7 @@ SCENARIO("Test making (mostly routing) passes using PassGenerators") { GIVEN("Full compilation sequence") { SquareGrid grid(1, 5); std::vector passes = { - DecomposeBoxes(), RebaseTket(), gen_default_mapping_pass(grid)}; + DecomposeBoxes(), RebaseTket(), gen_default_mapping_pass(grid, true)}; REQUIRE_NOTHROW(SequencePass(passes)); } } @@ -493,7 +499,7 @@ SCENARIO("Track initial and final maps throughout compilation") { CompilationUnit cu(circ); SquareGrid grid(2, 3); - PassPtr cp_route = gen_default_mapping_pass(grid); + PassPtr cp_route = gen_default_mapping_pass(grid, false); cp_route->apply(cu); bool ids_updated = true; for (auto pair : cu.get_initial_map_ref().left) { @@ -797,7 +803,7 @@ SCENARIO("rebase and decompose PhasePolyBox test") { Circuit result = cu.get_circ_ref(); REQUIRE(test_unitary_comparison(result, circ)); - REQUIRE(!NoWireSwapsPredicate().verify(result)); + REQUIRE(NoWireSwapsPredicate().verify(result)); } GIVEN("NoWireSwapsPredicate for ComposePhasePolyBoxes II") { Circuit circ(5); @@ -817,7 +823,73 @@ SCENARIO("rebase and decompose PhasePolyBox test") { Circuit result = cu.get_circ_ref(); REQUIRE(test_unitary_comparison(result, circ)); - REQUIRE(!NoWireSwapsPredicate().verify(result)); + REQUIRE(NoWireSwapsPredicate().verify(result)); + } + GIVEN("NoWireSwapsPredicate for ComposePhasePolyBoxes III") { + Circuit circ(5); + add_2qb_gates(circ, OpType::CX, {{0, 3}, {1, 4}}); + circ.add_op(OpType::SWAP, {3, 4}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Z, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::SWAP, {0, 1}); + circ.add_op(OpType::CX, {1, 4}); + circ.add_op(OpType::Z, {4}); + circ.add_op(OpType::CX, {1, 4}); + circ.add_op(OpType::SWAP, {1, 2}); + circ.add_op(OpType::SWAP, {2, 3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + + REQUIRE(NoWireSwapsPredicate().verify(circ)); + circ.replace_SWAPs(); + REQUIRE(!NoWireSwapsPredicate().verify(circ)); + + CompilationUnit cu(circ); + REQUIRE(ComposePhasePolyBoxes()->apply(cu)); + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(result, circ)); + REQUIRE(NoWireSwapsPredicate().verify(result)); + } + GIVEN("NoWireSwapsPredicate for aas I") { + std::vector nodes = {Node(0), Node(1), Node(2), Node(3), Node(4)}; + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}}); + + Circuit circ(5); + add_2qb_gates(circ, OpType::CX, {{0, 3}, {1, 4}}); + circ.add_op(OpType::SWAP, {3, 4}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::Z, {3}); + circ.add_op(OpType::CX, {2, 3}); + circ.add_op(OpType::SWAP, {0, 1}); + circ.add_op(OpType::CX, {1, 4}); + circ.add_op(OpType::Z, {4}); + circ.add_op(OpType::CX, {1, 4}); + circ.add_op(OpType::SWAP, {1, 2}); + circ.add_op(OpType::SWAP, {2, 3}); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::CX, {1, 2}); + circ.add_op(OpType::CX, {2, 3}); + + REQUIRE(NoWireSwapsPredicate().verify(circ)); + circ.replace_SWAPs(); + REQUIRE(!NoWireSwapsPredicate().verify(circ)); + + CompilationUnit cu(circ); + + REQUIRE(gen_full_mapping_pass_phase_poly( + architecture, 1, aas::CNotSynthType::Rec) + ->apply(cu)); + Circuit result = cu.get_circ_ref(); + + REQUIRE(test_unitary_comparison(result, circ)); + REQUIRE(NoWireSwapsPredicate().verify(result)); } } @@ -833,7 +905,9 @@ SCENARIO("DecomposeArbitrarilyControlledGates test") { SCENARIO("Precomposed passes successfully compose") { GIVEN("gen_directed_cx_routing_pass") { RingArch arc(6); - REQUIRE_NOTHROW(gen_directed_cx_routing_pass(arc)); + REQUIRE_NOTHROW(gen_directed_cx_routing_pass( + arc, {std::make_shared(), + std::make_shared()})); } } @@ -854,7 +928,9 @@ SCENARIO("Test Pauli Graph Synthesis Pass") { SCENARIO("Compose Pauli Graph synthesis Passes") { RingArch arc(10); - PassPtr dir_pass = gen_directed_cx_routing_pass(arc); + PassPtr dir_pass = gen_directed_cx_routing_pass( + arc, {std::make_shared(), + std::make_shared()}); GIVEN("Special UCC Synthesis") { PassPtr spec_ucc = gen_special_UCC_synthesis(); REQUIRE_NOTHROW(spec_ucc >> dir_pass); @@ -937,14 +1013,17 @@ SCENARIO("Commute measurements to the end of a circuit") { Architecture line({{0, 1}, {1, 2}, {2, 3}}); PlacementPtr pp = std::make_shared(line); - PassPtr route_pass = gen_full_mapping_pass(line, pp); + PassPtr route_pass = gen_full_mapping_pass( + line, pp, + {std::make_shared(), + std::make_shared()}); CompilationUnit cu(test); route_pass->apply(cu); REQUIRE(delay_pass->apply(cu)); Command final_command = cu.get_circ_ref().get_commands()[7]; OpType type = final_command.get_op_ptr()->get_type(); REQUIRE(type == OpType::Measure); - REQUIRE(final_command.get_args().front() == Node(1)); + REQUIRE(final_command.get_args().front() == Node(3)); } } @@ -981,8 +1060,9 @@ SCENARIO("CX mapping pass") { PlacementPtr placer = std::make_shared(line); Circuit cx(2); cx.add_op(OpType::CX, {0, 1}); - PassPtr rebase = gen_rebase_pass( - {OpType::CX}, cx, all_single_qubit_types(), Transforms::tk1_to_tk1); + OpTypeSet gateset = all_single_qubit_types(); + gateset.insert(OpType::CX); + PassPtr rebase = gen_rebase_pass(gateset, cx, CircPool::tk1_to_tk1); // Circuit mapping basis states to basis states Circuit c(3); @@ -1006,8 +1086,13 @@ SCENARIO("CX mapping pass") { REQUIRE(is_classical_map(c_placed)); // Route + LexiRouteRoutingMethod lrrm(50); + RoutingMethodPtr rmw = std::make_shared(lrrm); CompilationUnit cu_route(c_placed); - gen_routing_pass(line)->apply(cu_route); + gen_routing_pass( + line, {std::make_shared(), + std::make_shared()}) + ->apply(cu_route); const Circuit& c_routed = cu_route.get_circ_ref(); // Rebase again diff --git a/tket/tests/test_LexiRoute.cpp b/tket/tests/test_LexiRoute.cpp new file mode 100644 index 0000000000..91ec62a7c5 --- /dev/null +++ b/tket/tests/test_LexiRoute.cpp @@ -0,0 +1,1585 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/Verification.hpp" +#include "Placement/Placement.hpp" +#include "Predicates/CompilationUnit.hpp" +#include "Predicates/CompilerPass.hpp" +#include "Predicates/PassGenerators.hpp" +#include "Predicates/PassLibrary.hpp" +#include "Transformations/Decomposition.hpp" +#include "testutil.hpp" + +namespace tket { + +// Checks if the initial/final maps are correct by walking through the circuit +bool check_permutation( + const Circuit& circ, const std::shared_ptr& bimaps) { + // qubits |-> nodes + // qubits get moved with swap gates + unit_bimap_t qubit_map; + for (auto q : circ.all_qubits()) { + qubit_map.left.insert({bimaps->initial.right.find(q)->second, q}); + } + for (const Command& cmd : circ.get_commands()) { + Op_ptr op = cmd.get_op_ptr(); + if (op->get_type() == OpType::SWAP) { + unit_vector_t units = cmd.get_args(); + // swap qubits in qubit_map + auto it0 = qubit_map.right.find(units[0]); + auto it1 = qubit_map.right.find(units[1]); + UnitID q0 = it0->second; + UnitID q1 = it1->second; + qubit_map.right.erase(it1); + qubit_map.right.erase(it0); + qubit_map.left.insert({q1, units[0]}); + qubit_map.left.insert({q0, units[1]}); + } + } + // Check this agrees with the final map + for (auto it = qubit_map.left.begin(); it != qubit_map.left.end(); ++it) { + auto final_it = bimaps->final.left.find(it->first); + if (final_it == bimaps->final.left.end() || + final_it->second != it->second) { + return false; + } + } + return true; +} + +// Checks if the results matches the initial circ after resolving the +// permutations +bool check_permutation_unitary( + Circuit& initial_circ, Circuit& circ, + const std::shared_ptr& maps) { + for (auto me : maps->initial) { + if (me.left != me.right) return false; + } + + // bool found_permutations = true; + while (true) { + bool found_permutations = false; + for (auto me : maps->final) { + if (me.left != me.right) found_permutations = true; + } + if (found_permutations) { + for (auto me : maps->final) { + if (me.left != me.right) { + circ.add_op(OpType::SWAP, {Qubit(me.left), Qubit(me.right)}); + break; + } + } + } else { + return test_unitary_comparison(initial_circ, circ); + } + } + + return true; +} + +void add_swap_tests( + Circuit& circ, std::vector& node_vec, unsigned u0, unsigned u1) { + std::vector qubits_renamed = circ.all_qubits(); + circ.add_op(OpType::SWAP, {qubits_renamed[u0], qubits_renamed[u1]}); + + Node no = node_vec[u0]; + node_vec[u0] = node_vec[u1]; + node_vec[u1] = no; +} + +SCENARIO("Test LexiRoute::solve and LexiRoute::solve_labelling") { + std::vector nodes = {Node("test_node", 0), Node("test_node", 1), + Node("test_node", 2), Node("node_test", 3), + Node("node_test", 4), Node("node_test", 5), + Node("test_node", 6), Node("node_test", 7)}; + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[2], nodes[5]}, + {nodes[5], nodes[6]}, + {nodes[3], nodes[7]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("Single best solution, all qubits labelled.") { + Circuit circ(6); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + circ.add_op(OpType::CX, {qubits[4], qubits[5]}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[6]}, {qubits[5], nodes[5]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + + lr.solve(4); + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 4); + Command swap_c = commands[1]; + unit_vector_t uids = {nodes[1], nodes[2]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + } + GIVEN("Single best solution, one qubit unlabelled.") { + Circuit circ(6); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + circ.add_op(OpType::CX, {qubits[4], qubits[5]}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, + {qubits[5], nodes[5]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf0 = std::make_shared(circ); + LexiRoute lr(shared_arc, mf0); + lr.solve_labelling(); + + REQUIRE(mf0->circuit_.n_gates() == 3); + + rename_map = {{qubits[4], nodes[6]}}; + mf0->circuit_.rename_units(rename_map); + + MappingFrontier_ptr mf1 = std::make_shared(circ); + LexiRoute lr1(shared_arc, mf1); + lr1.solve(4); + + std::vector commands = mf1->circuit_.get_commands(); + Command swap_c = commands[1]; + unit_vector_t uids = {nodes[1], nodes[2]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + } + GIVEN("Single best solution, one stage of look-ahead required.") { + Circuit circ(8); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[2], qubits[7]}); + // n7 + // | + // n0 -- n1 -- n2 -- n3 -- n4 + // | + // n5 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + + lr.solve(4); + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 4); + Command swap_c = commands[0]; + unit_vector_t uids = {nodes[7], nodes[3]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + Command changed_c = commands[3]; + uids = {nodes[2], nodes[3]}; + REQUIRE(changed_c.get_args() == uids); + } + GIVEN("All unlabelled, labelling can give complete solution.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[0], qubits[3]}); + circ.add_op(OpType::CX, {qubits[3], qubits[4]}); + + MappingFrontier_ptr mf = std::make_shared(circ); + LexiRoute lr0(shared_arc, mf); + lr0.solve_labelling(); + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 4); + Command c = commands[0]; + unit_vector_t uids = {nodes[2], nodes[1]}; + REQUIRE(c.get_args() == uids); + mf->advance_frontier_boundary(shared_arc); + + LexiRoute lr1(shared_arc, mf); + lr1.solve_labelling(); + uids = {nodes[2], nodes[3]}; + REQUIRE(mf->circuit_.get_commands()[1].get_args() == uids); + mf->advance_frontier_boundary(shared_arc); + + LexiRoute lr2(shared_arc, mf); + lr2.solve_labelling(); + uids = {nodes[2], nodes[5]}; + REQUIRE(mf->circuit_.get_commands()[2].get_args() == uids); + mf->advance_frontier_boundary(shared_arc); + + LexiRoute lr3(shared_arc, mf); + lr3.solve_labelling(); + uids = {nodes[5], nodes[6]}; + REQUIRE(mf->circuit_.get_commands()[3].get_args() == uids); + } + GIVEN("Bridge preferred, CX.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[1]}, + {qubits[1], nodes[3]}, + {qubits[2], nodes[0]}, + {qubits[3], nodes[7]}, + {qubits[4], nodes[2]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr(shared_arc, mf); + lr.solve(4); + Command bridge_c = mf->circuit_.get_commands()[0]; + unit_vector_t uids = {nodes[1], nodes[2], nodes[3]}; + REQUIRE(bridge_c.get_args() == uids); + REQUIRE(*bridge_c.get_op_ptr() == *get_op_ptr(OpType::BRIDGE)); + } + GIVEN("Bridge preferred, CZ.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[1]}, + {qubits[1], nodes[3]}, + {qubits[2], nodes[0]}, + {qubits[3], nodes[7]}, + {qubits[4], nodes[2]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr(shared_arc, mf); + lr.solve(4); + REQUIRE(mf->circuit_.get_commands().size() == 4); + } + GIVEN("Bridge preferred, conditional CX.") { + Circuit circ(5, 1); + std::vector qubits = circ.all_qubits(); + circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0}, 1); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[1]}, + {qubits[1], nodes[3]}, + {qubits[2], nodes[0]}, + {qubits[3], nodes[7]}, + {qubits[4], nodes[2]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr(shared_arc, mf); + lr.solve(4); + REQUIRE(mf->circuit_.get_commands().size() == 4); + } + GIVEN("Bridge preferred, conditional CZ.") { + Circuit circ(5, 1); + std::vector qubits = circ.all_qubits(); + circ.add_conditional_gate(OpType::CZ, {}, {0, 1}, {0}, 1); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[1]}, + {qubits[1], nodes[3]}, + {qubits[2], nodes[0]}, + {qubits[3], nodes[7]}, + {qubits[4], nodes[2]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr(shared_arc, mf); + lr.solve(4); + REQUIRE(mf->circuit_.get_commands().size() == 4); + } + + GIVEN("Ancilla assignment, one valid node.") { + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4)}; + // just a ring + + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[4], nodes[0]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + std::map rename_map = { + {qubits[0], nodes[2]}, {qubits[1], nodes[4]}}; + circ.rename_units(rename_map); + + MappingFrontier_ptr mf = std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr0(shared_arc, mf); + lr0.solve(20); + REQUIRE(circ.all_qubits()[1] == nodes[4]); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr1(shared_arc, mf); + lr1.solve_labelling(); + REQUIRE(circ.all_qubits()[0] == nodes[3]); + } + + GIVEN("Ancilla assignment, multiple valid Node.") { + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + + std::vector nodes = {Node("test_node", 0), Node("test_node", 1), + Node("test_node", 2), Node("node_test", 3), + Node("node_test", 4), Node("node_test", 5), + Node("node_test", 6)}; + // A ring, but with two identical length paths where ancilla could be + // assigned + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[2], nodes[5]}, + {nodes[3], nodes[6]}, + {nodes[5], nodes[6]}, + {nodes[3], nodes[4]}, + {nodes[5], nodes[4]}, + {nodes[4], nodes[0]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + std::map rename_map = { + {qubits[0], nodes[2]}, {qubits[1], nodes[4]}}; + circ.rename_units(rename_map); + + MappingFrontier_ptr mf = std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr0(shared_arc, mf); + lr0.solve_labelling(); + + mf->advance_frontier_boundary(shared_arc); + LexiRoute lr1(shared_arc, mf); + lr1.solve(20); + + REQUIRE(circ.all_qubits()[1] == nodes[5]); + } + GIVEN("Ancilla assignment, one valid Node, with merge.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::H, {qubits[3]}); + + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4)}; + // just a ring + + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[4], nodes[0]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + std::map rename_map = { + {qubits[0], nodes[2]}, {qubits[1], nodes[4]}, {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier_ptr mf = std::make_shared(circ); + mf->ancilla_nodes_.insert(nodes[3]); + mf->advance_frontier_boundary(shared_arc); + + LexiRoute lr0(shared_arc, mf); + lr0.solve_labelling(); + + REQUIRE(circ.all_qubits()[1] == nodes[4]); + REQUIRE(circ.all_qubits()[0] == nodes[3]); + } + GIVEN( + "Single best solution, with measurements and classically controlled " + "gates.") { + Circuit circ(6, 1); + std::vector qubits = circ.all_qubits(); + circ.add_conditional_gate(OpType::CX, {}, {0, 2}, {0}, 1); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + circ.add_conditional_gate(OpType::X, {}, {0}, {0}, 1); + circ.add_op(OpType::Measure, {qubits[1], Bit(0)}); + circ.add_op(OpType::CX, {qubits[4], qubits[5]}); + circ.add_op(OpType::Measure, {qubits[3], Bit(0)}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[6]}, {qubits[5], nodes[5]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + + lr.solve(4); + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 7); + Command swap_c = commands[1]; + unit_vector_t uids = {nodes[1], nodes[2]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + } + GIVEN( + "Labelling is required, but there are no free remaining qubits, for" + " one updated label, order 0.") { + Circuit circ(9); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[1], qubits[8]}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + + REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); + } + GIVEN( + "Labelling is required, but there are no free remaining qubits, for " + "one updated label, order 1.") { + Circuit circ(9); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[1], qubits[8]}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[8], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); + } + GIVEN( + "Labelling is required, but there are no free remaining qubits, for" + "two updated labels.") { + Circuit circ(10); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[9], qubits[8]}); + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiRoute lr(shared_arc, mf); + REQUIRE_THROWS_AS(lr.solve_labelling(), LexiRouteError); + } +} + +SCENARIO("Test LexiLabellingMethod") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4)}; + + // straight line + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("No qubit to label, empty frontier, routing_method false.") { + Circuit circ(5); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.routing_method(mf, shared_arc).first); + } + GIVEN("No qubit to label, partially filled frontier, routing_method false.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); + circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[0]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, + {qubits[4], nodes[4]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.routing_method(mf, shared_arc).first); + } + GIVEN("Qubit to label, but casually restricted, routing_method false.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); + circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[0]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[4], nodes[4]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.routing_method(mf, shared_arc).first); + } + GIVEN( + "Two Qubit to label in future slice, causally restricted, " + "routing_method false.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + circ.add_op(OpType::ZZPhase, 0.3, {qubits[3], qubits[4]}); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.routing_method(mf, shared_arc).first); + } + GIVEN("Three Qubit Gate, all labelled, first slice, routing_method false.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CCX, {qubits[1], qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, + {qubits[4], nodes[4]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + LexiLabellingMethod lrm; + REQUIRE(!lrm.routing_method(mf, shared_arc).first); + } + GIVEN("One unlabelled qubit, one slice, check and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + VertPort pre_label = + mf->linear_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + std::pair out = lrm.routing_method(mf, shared_arc); + REQUIRE(out.first); + REQUIRE( + mf->linear_boundary->get().find(qubits[3]) == + mf->linear_boundary->get().end()); + VertPort post_label = + mf->linear_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label == post_label); + } + GIVEN( + "One unlabelled qubit, two slices, lookahead for better solution, check" + " and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[2], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + VertPort pre_label = + mf->linear_boundary->get().find(qubits[2])->second; + LexiLabellingMethod lrm; + + std::pair out = lrm.routing_method(mf, shared_arc); + REQUIRE(out.first); + REQUIRE( + mf->linear_boundary->get().find(qubits[2]) == + mf->linear_boundary->get().end()); + VertPort post_label = + mf->linear_boundary->get().find(nodes[2])->second; + REQUIRE(pre_label == post_label); + } + GIVEN("Two unlabelled qubits, one slice, check and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[2], qubits[3]}); + + std::map rename_map = { + {qubits[2], nodes[2]}, {qubits[1], nodes[1]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + VertPort pre_label_0 = + mf->linear_boundary->get().find(qubits[0])->second; + VertPort pre_label_3 = + mf->linear_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + std::pair out = lrm.routing_method(mf, shared_arc); + REQUIRE(out.first); + REQUIRE( + mf->linear_boundary->get().find(qubits[0]) == + mf->linear_boundary->get().end()); + REQUIRE( + mf->linear_boundary->get().find(qubits[3]) == + mf->linear_boundary->get().end()); + VertPort post_label_0 = + mf->linear_boundary->get().find(nodes[0])->second; + REQUIRE(pre_label_0 == post_label_0); + VertPort post_label_3 = + mf->linear_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label_3 == post_label_3); + } + GIVEN("Two unlabelled qubits, two slices, lookahead, check and route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[2], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[4], qubits[3]}); + circ.add_op(OpType::CX, {qubits[2], qubits[4]}); + + std::map rename_map = { + {qubits[4], nodes[4]}, {qubits[1], nodes[1]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + VertPort pre_label_0 = + mf->linear_boundary->get().find(qubits[2])->second; + VertPort pre_label_3 = + mf->linear_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + std::pair out = lrm.routing_method(mf, shared_arc); + REQUIRE(out.first); + REQUIRE( + mf->linear_boundary->get().find(qubits[2]) == + mf->linear_boundary->get().end()); + REQUIRE( + mf->linear_boundary->get().find(qubits[3]) == + mf->linear_boundary->get().end()); + VertPort post_label_0 = + mf->linear_boundary->get().find(nodes[0])->second; + REQUIRE(pre_label_0 == post_label_0); + VertPort post_label_3 = + mf->linear_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label_3 == post_label_3); + } + GIVEN( + "Two unlabelled qubits, two slices, lookahead unrouted, check and " + "route.") { + Circuit circ(5); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[2], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.8, {qubits[4], qubits[3]}); + circ.add_op(OpType::CX, {qubits[2], qubits[0]}); + + std::map rename_map = { + {qubits[4], nodes[4]}, {qubits[1], nodes[1]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + VertPort pre_label_0 = + mf->linear_boundary->get().find(qubits[2])->second; + VertPort pre_label_3 = + mf->linear_boundary->get().find(qubits[3])->second; + LexiLabellingMethod lrm; + std::pair out = lrm.routing_method(mf, shared_arc); + REQUIRE(out.first); + REQUIRE( + mf->linear_boundary->get().find(qubits[2]) == + mf->linear_boundary->get().end()); + REQUIRE( + mf->linear_boundary->get().find(qubits[3]) == + mf->linear_boundary->get().end()); + VertPort post_label_0 = + mf->linear_boundary->get().find(nodes[0])->second; + REQUIRE(pre_label_0 == post_label_0); + VertPort post_label_3 = + mf->linear_boundary->get().find(nodes[3])->second; + REQUIRE(pre_label_3 == post_label_3); + } +} +SCENARIO("Test LexiRouteRoutingMethod") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4), Node("node_test", 5), + Node("test_node", 6), Node("node_test", 7), Node("node_test", 8), + Node("node_test", 9), Node("node_test", 10)}; + // n9 -- n8 -- n10 + // | | + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[2], nodes[5]}, + {nodes[5], nodes[6]}, + {nodes[3], nodes[7]}, + {nodes[2], nodes[8]}, + {nodes[8], nodes[9]}, + {nodes[8], nodes[10]}, + {nodes[3], nodes[10]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("Circuit with all qubits, labelled, stage 0.") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + circ.rename_units(rename_map); + + MappingFrontier_ptr mf = std::make_shared(circ); + LexiRouteRoutingMethod lrrm(100); + std::pair bool_init_map = + lrrm.routing_method(mf, shared_arc); + REQUIRE(bool_init_map.first); + REQUIRE(bool_init_map.second.size() == 0); + + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 9); + Command bridge_c = commands[2]; + unit_vector_t uids = {nodes[8], nodes[2], nodes[5]}; + REQUIRE(bridge_c.get_args() == uids); + REQUIRE(*bridge_c.get_op_ptr() == *get_op_ptr(OpType::BRIDGE)); + } + GIVEN("Circuit with all qubits, labelled, stage 1.") { + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + // n9 -- n8 -- n3 + // | | + // n0 -- n1 -- n2 -- n10 -- n4 + // | | + // n6 n7 + // | + // n5 + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[6]}, + {qubits[6], nodes[5]}, {qubits[7], nodes[7]}, {qubits[8], nodes[8]}, + {qubits[9], nodes[9]}, {qubits[10], nodes[10]}}; + circ.rename_units(rename_map); + + MappingFrontier_ptr mf = std::make_shared(circ); + LexiRouteRoutingMethod lrrm(100); + std::pair bool_init_map = + lrrm.routing_method(mf, shared_arc); + REQUIRE(bool_init_map.first); + REQUIRE(bool_init_map.second.size() == 0); + std::vector commands = mf->circuit_.get_commands(); + REQUIRE(commands.size() == 10); + Command swap_c = commands[0]; + unit_vector_t uids = {nodes[3], nodes[10]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + } +} +SCENARIO("Test MappingManager with LexiRouteRoutingMethod and LexiLabelling") { + GIVEN("11 Node Architecture, 11 Qubit circuit, multiple SWAP required.") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3), Node("node_test", 4), Node("node_test", 5), + Node("test_node", 6), Node("node_test", 7), Node("node_test", 8), + Node("node_test", 9), Node("node_test", 10)}; + // n9 -- n8 -- n10 + // | | + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[2], nodes[5]}, + {nodes[5], nodes[6]}, + {nodes[3], nodes[7]}, + {nodes[2], nodes[8]}, + {nodes[8], nodes[9]}, + {nodes[8], nodes[10]}, + {nodes[3], nodes[10]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + Circuit circ(11); + std::vector qubits = circ.all_qubits(); + for (unsigned i = 0; i < 11; i++) { + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[1], qubits[10]}); + circ.add_op(OpType::CX, {qubits[8], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[2], qubits[8]}); + + circ.add_op(OpType::CX, {qubits[1], qubits[5]}); + circ.add_op(OpType::CX, {qubits[3], qubits[9]}); + circ.add_op(OpType::CX, {qubits[10], qubits[0]}); + circ.add_op(OpType::CX, {qubits[6], qubits[0]}); + } + + Circuit copy_circ(circ); + // transform stuff + PassPtr dec = gen_decompose_routing_gates_to_cxs_pass(architecture, false); + + MappingManager mm(shared_arc); + MappingFrontier_ptr mf = std::make_shared(copy_circ); + + LexiLabellingMethod lrm; + std::vector vrm = { + std::make_shared(lrm), + std::make_shared()}; + // Contains initial and final map + std::shared_ptr maps = std::make_shared(); + // Initialise the maps by the same way it's done with CompilationUnit + for (const UnitID& u : circ.all_units()) { + maps->initial.insert({u, u}); + maps->final.insert({u, u}); + } + + bool res = mm.route_circuit_with_maps(circ, vrm, maps); + + PredicatePtr routed_correctly = + std::make_shared(architecture); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu0(circ, preds); + dec->apply(cu0); + REQUIRE(res); + REQUIRE(cu0.check_all_predicates()); + REQUIRE(check_permutation(circ, maps)); + } + GIVEN("Square Grid Architecture, large number of gates.") { + SquareGrid sg(5, 10); + ArchitecturePtr shared_arc = std::make_shared(sg); + Circuit circ(35); + std::vector qubits = circ.all_qubits(); + for (unsigned i = 0; i < qubits.size() - 1; i++) { + circ.add_op(OpType::CX, {qubits[i], qubits[i + 1]}); + } + for (unsigned i = 0; i < qubits.size() - 2; i++) { + circ.add_op(OpType::CZ, {qubits[i], qubits[i + 2]}); + } + // transform stuff + PassPtr dec = gen_decompose_routing_gates_to_cxs_pass(sg, false); + + MappingManager mm(shared_arc); + LexiLabellingMethod lrm; + std::vector vrm = { + std::make_shared(lrm), + std::make_shared()}; + bool res = mm.route_circuit(circ, vrm); + + PredicatePtr routed_correctly = std::make_shared(sg); + PredicatePtrMap preds{CompilationUnit::make_type_pair(routed_correctly)}; + CompilationUnit cu(circ, preds); + dec->apply(cu); + REQUIRE(res); + REQUIRE(cu.check_all_predicates()); + REQUIRE(circ.n_gates() == 88); + } +} + +SCENARIO( + "Check that an already solved routing problem will not add unecessary " + "swaps") { + GIVEN("A solved problem") { + Circuit test_circuit; + test_circuit.add_blank_wires(4); + add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {1, 2}, {2, 3}, {3, 0}}); + + // Ring of size 4 + RingArch arc(4); + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + test_circuit, {std::make_shared(), + std::make_shared()})); + REQUIRE(test_circuit.n_gates() == 4); + } + GIVEN("A solved problem supplied with map and custom architecture") { + Circuit test_circuit; + test_circuit.add_blank_wires(4); + add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {1, 2}, {2, 3}, {3, 0}}); + + Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 0}}); + Placement test_p(test_arc); + + qubit_mapping_t map_; + for (unsigned nn = 0; nn <= 3; ++nn) { + map_[Qubit(nn)] = Node(nn); + } + test_p.place_with_map(test_circuit, map_); + qubit_vector_t all_qs_post_place = test_circuit.all_qubits(); + + MappingManager mm(std::make_shared(test_arc)); + REQUIRE(!mm.route_circuit( + test_circuit, {std::make_shared(), + std::make_shared()})); + + qubit_vector_t all_qs_post_solve = test_circuit.all_qubits(); + REQUIRE(all_qs_post_place == all_qs_post_solve); + REQUIRE(test_circuit.n_gates() == 4); + } +} + +SCENARIO("Empty Circuit test") { + GIVEN("An Empty Circuit") { + Circuit circ; + circ.add_blank_wires(4); + Architecture arc({{0, 1}, {1, 2}, {2, 3}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE(!mm.route_circuit( + circ, + { + std::make_shared(), + std::make_shared(), + }, + false)); + REQUIRE(circ.n_gates() == 0); + } +} + +SCENARIO("Routing on circuit with no multi-qubit gates") { + GIVEN("A circuit with no multi-qubit gates") { + Circuit circ; + circ.add_blank_wires(4); + add_1qb_gates(circ, OpType::X, {0, 2}); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::Y, {1}); + + unsigned orig_vertices = circ.n_vertices(); + Architecture arc({{0, 1}, {1, 2}, {2, 3}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE(!mm.route_circuit( + circ, + { + std::make_shared(), + std::make_shared(), + }, + false)); + REQUIRE(orig_vertices - 8 == circ.n_gates()); + } +} + +SCENARIO("Test routing on a directed architecture with bidirectional edges") { + GIVEN("A simple two-qubit circuit") { + Circuit circ(2); + circ.add_op(OpType::H, {0}); + circ.add_op(OpType::CX, {0, 1}); + Architecture arc({{0, 1}, {1, 0}}); + Architecture arc2(std::vector>{{0, 1}}); + + // routing ignored bi directional edge and solves correctly + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + REQUIRE(circ.n_gates() == 2); + CHECK(respects_connectivity_constraints(circ, arc, false)); + } +} + +SCENARIO( + "Test routing on a directed architecture doesn't throw an error if " + "non-cx optype is presented") { + GIVEN( + "A simple two-qubit circuit with non-cx multi-qubit gates and a " + "directed architecture") { + Circuit circ(2); + circ.add_op(OpType::CU1, 0.5, {1, 0}); + circ.add_op(OpType::CU1, 0.5, {0, 1}); + circ.add_op(OpType::CY, {1, 0}); + circ.add_op(OpType::CY, {0, 1}); + circ.add_op(OpType::CZ, {1, 0}); + circ.add_op(OpType::CZ, {0, 1}); + circ.add_op(OpType::CRz, 0.5, {1, 0}); + circ.add_op(OpType::CRz, 0.5, {0, 1}); + + Architecture arc(std::vector>{{0, 1}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + REQUIRE(circ.n_gates() == 8); + } +} + +SCENARIO("Dense CX circuits route succesfully") { + GIVEN( + "Complex CX circuits for large directed architecture based off " + "IBMTokyo") { + Circuit circ(17); + for (unsigned x = 0; x < 17; ++x) { + for (unsigned y = 0; y + 1 < x; ++y) { + if (x % 2) { // swap the way directed chain runs each time + add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); + } else { + add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); + } + } + } + Architecture arc( + {{0, 1}, {1, 2}, {2, 3}, {3, 4}, {0, 5}, {1, 6}, {1, 7}, + {2, 6}, {2, 7}, {3, 8}, {3, 9}, {4, 8}, {4, 9}, {5, 6}, + {5, 10}, {5, 11}, {6, 10}, {6, 11}, {6, 7}, {7, 12}, {7, 13}, + {7, 8}, {8, 12}, {8, 13}, {8, 9}, {10, 11}, {11, 16}, {11, 17}, + {11, 12}, {12, 16}, {12, 17}, {12, 13}, {13, 18}, {13, 19}, {13, 14}, + {14, 18}, {14, 19}, {15, 16}, {16, 17}, {17, 18}, {18, 19}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + (Transforms::decompose_SWAP_to_CX() >> Transforms::decompose_BRIDGE_to_CX()) + .apply(circ); + + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } +} + +SCENARIO( + "Dense CX circuits route succesfully on undirected Ring with " + "placement.") { + GIVEN("Complex CX circuits, big ring") { + Circuit circ(29); + for (unsigned x = 0; x < 29; ++x) { + for (unsigned y = 0; y + 1 < x; ++y) { + if (x % 2) { + add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); + } else { + add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); + } + } + } + RingArch arc(29); + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + } +} + +SCENARIO( + "Dense CX circuits route succesfully on smart placement unfriendly " + "architecture.") { + GIVEN("Complex CX circuits, big ring") { + Circuit circ(13); + for (unsigned x = 0; x < 13; ++x) { + for (unsigned y = 0; y + 1 < x; ++y) { + if (x % 2) { + add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); + } else { + add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); + } + } + } + Architecture arc( + {{0, 1}, + {2, 0}, + {2, 4}, + {6, 4}, + {8, 6}, + {8, 10}, + {12, 10}, + {3, 1}, + {3, 5}, + {7, 5}, + {7, 9}, + {11, 9}, + {11, 13}, + {12, 13}, + {6, 7}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + } +} + +SCENARIO("Empty circuits, with and without blank wires") { + GIVEN("An empty circuit with some qubits") { + Circuit circ(6); + RingArch arc(6); + MappingManager mm(std::make_shared(arc)); + REQUIRE(!mm.route_circuit( + circ, + { + std::make_shared(), + std::make_shared(), + }, + false)); + REQUIRE(circ.depth() == 0); + REQUIRE(circ.n_gates() == 0); + REQUIRE(circ.n_qubits() == 6); + REQUIRE(!respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("An empty circuit with some qubits with labelling") { + Circuit circ(6); + RingArch arc(6); + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + REQUIRE(circ.depth() == 0); + REQUIRE(circ.n_gates() == 0); + REQUIRE(circ.n_qubits() == 6); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("An empty circuit with no qubits") { + Circuit circ(0); + RingArch arc(6); + MappingManager mm(std::make_shared(arc)); + REQUIRE(!mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + REQUIRE(circ.depth() == 0); + REQUIRE(circ.n_gates() == 0); + REQUIRE(circ.n_qubits() == 0); + } +} + +SCENARIO("Initial map should contain all data qubits") { + GIVEN("An example circuit") { + Circuit circ(10); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CZ, {qubits[1], qubits[4]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[7], qubits[6]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[4], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[8], qubits[7]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[9], qubits[4]}); + circ.add_op(OpType::CZ, {qubits[6], qubits[5]}); + SquareGrid sg(4, 4); + // Contains initial and final map + std::shared_ptr maps = std::make_shared(); + // Initialise the maps by the same way it's done with CompilationUnit + for (const UnitID& u : circ.all_units()) { + maps->initial.insert({u, u}); + maps->final.insert({u, u}); + } + + MappingManager mm(std::make_shared(sg)); + mm.route_circuit_with_maps( + circ, + {std::make_shared(), + std::make_shared()}, + maps); + for (auto q : qubits) { + REQUIRE(maps->initial.left.find(q) != maps->initial.left.end()); + REQUIRE(maps->final.left.find(q) != maps->final.left.end()); + } + + REQUIRE(check_permutation(circ, maps)); + } + GIVEN("An example circuit with remap") { + Circuit circ(10); + SquareGrid sg(4, 4); + std::vector nodes = sg.get_all_nodes_vec(); + std::vector qubits = circ.all_qubits(); + + circ.add_op(OpType::CZ, {qubits[1], qubits[4]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[7], qubits[6]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[4], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[8], qubits[7]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[9], qubits[4]}); + circ.add_op(OpType::CZ, {qubits[6], qubits[5]}); + + std::map rename_map; + + for (unsigned i = 0; i < 10; ++i) { + rename_map.insert({qubits[i], nodes[i]}); + } + + circ.rename_units(rename_map); + + Circuit initial_circ = Circuit(circ); + + // Contains initial and final map + std::shared_ptr maps = std::make_shared(); + // Initialise the maps by the same way it's done with CompilationUnit + for (const UnitID& u : circ.all_units()) { + maps->initial.insert({u, u}); + maps->final.insert({u, u}); + } + + MappingManager mm(std::make_shared(sg)); + mm.route_circuit_with_maps( + circ, + {std::make_shared(), + std::make_shared()}, + maps); + for (auto q : circ.all_qubits()) { + REQUIRE(maps->initial.left.find(q) != maps->initial.left.end()); + REQUIRE(maps->final.left.find(q) != maps->final.left.end()); + } + REQUIRE(check_permutation(circ, maps)); + } + GIVEN("An example circuit with remap II") { + Circuit circ(6); + SquareGrid sg(3, 3); + std::vector nodes = sg.get_all_nodes_vec(); + std::vector qubits = circ.all_qubits(); + + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + + std::map rename_map; + + for (unsigned i = 0; i < 6; ++i) { + rename_map.insert({qubits[i], nodes[i]}); + } + + circ.rename_units(rename_map); + + Circuit initial_circ = Circuit(circ); + + // Contains initial and final map + std::shared_ptr maps = std::make_shared(); + // Initialise the maps by the same way it's done with CompilationUnit + for (const UnitID& u : circ.all_units()) { + maps->initial.insert({u, u}); + maps->final.insert({u, u}); + } + + MappingManager mm(std::make_shared(sg)); + mm.route_circuit_with_maps( + circ, + {std::make_shared(), + std::make_shared()}, + maps); + for (auto q : circ.all_qubits()) { + REQUIRE(maps->initial.left.find(q) != maps->initial.left.end()); + REQUIRE(maps->final.left.find(q) != maps->final.left.end()); + } + REQUIRE(check_permutation(circ, maps)); + + std::vector qubits_renamed = circ.all_qubits(); + + circ.add_op(OpType::SWAP, {qubits_renamed[1], qubits_renamed[4]}); + circ.add_op(OpType::SWAP, {qubits_renamed[3], qubits_renamed[4]}); + circ.add_op(OpType::SWAP, {qubits_renamed[1], qubits_renamed[2]}); + + REQUIRE(test_unitary_comparison(initial_circ, circ)); + } + GIVEN("An example circuit with remap III") { + Circuit circ(6); + SquareGrid sg(3, 3); + std::vector nodes = sg.get_all_nodes_vec(); + std::vector qubits = circ.all_qubits(); + + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[4], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[4], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[4], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[5]}); + + std::map rename_map; + + for (unsigned i = 0; i < 6; ++i) { + rename_map.insert({qubits[i], nodes[i]}); + } + + circ.rename_units(rename_map); + + Circuit initial_circ = Circuit(circ); + + // Contains initial and final map + std::shared_ptr maps = std::make_shared(); + // Initialise the maps by the same way it's done with CompilationUnit + for (const UnitID& u : circ.all_units()) { + maps->initial.insert({u, u}); + maps->final.insert({u, u}); + } + + MappingManager mm(std::make_shared(sg)); + mm.route_circuit_with_maps( + circ, + {std::make_shared(), + std::make_shared()}, + maps); + for (auto q : circ.all_qubits()) { + REQUIRE(maps->initial.left.find(q) != maps->initial.left.end()); + REQUIRE(maps->final.left.find(q) != maps->final.left.end()); + } + REQUIRE(check_permutation(circ, maps)); + + std::vector qubits_renamed = circ.all_qubits(); + + circ.add_op(OpType::SWAP, {qubits_renamed[2], qubits_renamed[5]}); + circ.add_op(OpType::SWAP, {qubits_renamed[3], qubits_renamed[4]}); + circ.add_op(OpType::SWAP, {qubits_renamed[1], qubits_renamed[2]}); + + REQUIRE(test_unitary_comparison(initial_circ, circ)); + } + GIVEN("An example circuit with remap IV") { + Circuit circ(6); + SquareGrid sg(3, 3); + std::vector nodes = sg.get_all_nodes_vec(); + std::vector qubits = circ.all_qubits(); + + circ.add_op(OpType::H, {qubits[0]}); + circ.add_op(OpType::H, {qubits[1]}); + circ.add_op(OpType::H, {qubits[2]}); + circ.add_op(OpType::H, {qubits[3]}); + circ.add_op(OpType::H, {qubits[4]}); + circ.add_op(OpType::H, {qubits[5]}); + + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[4], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + + circ.add_op(OpType::Y, {qubits[0]}); + circ.add_op(OpType::Y, {qubits[1]}); + circ.add_op(OpType::Y, {qubits[2]}); + circ.add_op(OpType::Y, {qubits[3]}); + circ.add_op(OpType::Y, {qubits[4]}); + circ.add_op(OpType::Y, {qubits[5]}); + + circ.add_op(OpType::CZ, {qubits[4], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[4], qubits[0]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[4], qubits[3]}); + + circ.add_op(OpType::Y, {qubits[0]}); + circ.add_op(OpType::Y, {qubits[1]}); + circ.add_op(OpType::Y, {qubits[2]}); + circ.add_op(OpType::Y, {qubits[3]}); + circ.add_op(OpType::Y, {qubits[4]}); + circ.add_op(OpType::Y, {qubits[5]}); + + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[0]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[5]}); + circ.add_op(OpType::CZ, {qubits[4], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[5]}); + + std::map rename_map; + + for (unsigned i = 0; i < 6; ++i) { + rename_map.insert({qubits[i], nodes[i]}); + } + + circ.rename_units(rename_map); + + Circuit initial_circ = Circuit(circ); + + // Contains initial and final map + std::shared_ptr maps = std::make_shared(); + // Initialise the maps by the same way it's done with CompilationUnit + for (const UnitID& u : circ.all_units()) { + maps->initial.insert({u, u}); + maps->final.insert({u, u}); + } + + MappingManager mm(std::make_shared(sg)); + mm.route_circuit_with_maps( + circ, + {std::make_shared(), + std::make_shared()}, + maps); + + for (auto q : circ.all_qubits()) { + REQUIRE(maps->initial.left.find(q) != maps->initial.left.end()); + REQUIRE(maps->final.left.find(q) != maps->final.left.end()); + } + REQUIRE(check_permutation(circ, maps)); + + std::vector qubits_renamed = circ.all_qubits(); + + // add swaps to resolve permutation + circ.add_op(OpType::SWAP, {qubits_renamed[1], qubits_renamed[2]}); + circ.add_op(OpType::SWAP, {qubits_renamed[4], qubits_renamed[5]}); + circ.add_op(OpType::SWAP, {qubits_renamed[1], qubits_renamed[4]}); + circ.add_op(OpType::SWAP, {qubits_renamed[1], qubits_renamed[3]}); + circ.add_op(OpType::SWAP, {qubits_renamed[2], qubits_renamed[5]}); + circ.add_op(OpType::SWAP, {qubits_renamed[1], qubits_renamed[2]}); + circ.add_op(OpType::SWAP, {qubits_renamed[3], qubits_renamed[4]}); + + REQUIRE(test_unitary_comparison(initial_circ, circ)); + } +} + +SCENARIO("Lexi relabel with partially mapped circuit") { + GIVEN("With an unplaced qubit") { + Architecture arc({{0, 1}, {1, 2}}); + Circuit c(3); + c.add_op(OpType::CZ, {0, 1}, "cz0,1"); + c.add_op(OpType::CZ, {1, 2}, "cz1,2"); + std::shared_ptr maps = std::make_shared(); + // Initialise the maps by the same way it's done with CompilationUnit + for (const UnitID& u : c.all_units()) { + maps->initial.insert({u, u}); + maps->final.insert({u, u}); + } + Placement pl(arc); + qubit_mapping_t partial_map; + partial_map.insert({Qubit(0), Node(0)}); + partial_map.insert({Qubit(1), Node(1)}); + pl.place_with_map(c, partial_map, maps); + + MappingManager mm(std::make_shared(arc)); + mm.route_circuit_with_maps( + c, + {std::make_shared(), + std::make_shared()}, + maps); + REQUIRE(check_permutation(c, maps)); + } + GIVEN("With an unplaced qubit merged to an ancilla") { + Circuit c(4); + c.add_op(OpType::CZ, {3, 0}, "cz3,0"); + c.add_op(OpType::CZ, {1, 0}, "cz1,0"); + c.add_op(OpType::CZ, {1, 3}, "cz1,3"); + c.add_op(OpType::CZ, {3, 2}, "cz3,2"); + + Architecture arc({{0, 1}, {0, 2}, {0, 3}, {4, 1}, {4, 2}}); + PassPtr plac_p = gen_placement_pass(std::make_shared(arc)); + CompilationUnit cu(c); + REQUIRE(plac_p->apply(cu)); + const unit_bimap_t& initial_map = cu.get_initial_map_ref(); + const unit_bimap_t& final_map = cu.get_final_map_ref(); + + PassPtr r_p = gen_routing_pass( + arc, {std::make_shared(), + std::make_shared()}); + REQUIRE(r_p->apply(cu)); + + for (const Qubit& q : c.all_qubits()) { + REQUIRE(initial_map.left.find(q) != initial_map.left.end()); + REQUIRE(final_map.left.find(q) != final_map.left.end()); + } + for (const Qubit& q : cu.get_circ_ref().all_qubits()) { + REQUIRE(initial_map.right.find(q) != initial_map.right.end()); + REQUIRE(final_map.right.find(q) != final_map.right.end()); + } + } +} + +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_LexicographicalComparison.cpp b/tket/tests/test_LexicographicalComparison.cpp new file mode 100644 index 0000000000..53f26a5973 --- /dev/null +++ b/tket/tests/test_LexicographicalComparison.cpp @@ -0,0 +1,225 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "Mapping/LexicographicalComparison.hpp" + +namespace tket { + +SCENARIO("Test LexicographicalComparison::LexicographicalComparison") { + GIVEN("Five Node Architecture, interacting nodes all in architecture.") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("test_node", 3), Node("test_node", 4)}; + // n0 -- n1 -- n2 + // | + // n3 + // | + // n4 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[1], nodes[3]}, + {nodes[3], nodes[4]}}); + interacting_nodes_t interacting_nodes = { + {nodes[0], nodes[3]}, + {nodes[3], nodes[0]}, + {nodes[2], nodes[4]}, + {nodes[4], nodes[2]}}; + + ArchitecturePtr sc = std::make_shared(architecture); + + LexicographicalComparison lc_test(sc, interacting_nodes); + + lexicographical_distances_t distances = + lc_test.get_lexicographical_distances(); + REQUIRE(distances.size() == 3); + REQUIRE(distances[0] == 2); + REQUIRE(distances[1] == 2); + REQUIRE(distances[2] == 0); + } + GIVEN("Three Node architecture, some interacting node not in architecture.") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2)}; + Architecture architecture({{nodes[0], nodes[1]}, {nodes[1], nodes[2]}}); + ArchitecturePtr sa = std::make_shared(architecture); + interacting_nodes_t interacting_nodes = { + {nodes[0], Node("bad_node", 4)}, {Node("test_node", 3), nodes[0]}}; + REQUIRE_THROWS_AS( + LexicographicalComparison(sa, interacting_nodes), + LexicographicalComparisonError); + } +} + +SCENARIO("Test LexicographicalComparison::increment_distances") { + GIVEN("Three Node Architecture, varying standard increments.") { + std::vector nodes = {Node(0), Node(1), Node(2)}; + Architecture architecture({{nodes[0], nodes[1]}, {nodes[1], nodes[2]}}); + interacting_nodes_t interactions = { + {nodes[0], nodes[2]}, {nodes[2], nodes[0]}}; + ArchitecturePtr sa = std::make_shared(architecture); + LexicographicalComparison lc_test(sa, interactions); + + lexicographical_distances_t distances = + lc_test.get_lexicographical_distances(); + REQUIRE(distances[0] == 2); + REQUIRE(distances[1] == 0); + + std::pair interaction = {nodes[0], nodes[2]}; + lc_test.increment_distances(distances, interaction, -2); + REQUIRE(distances[0] == 0); + REQUIRE(distances[1] == 0); + + REQUIRE_THROWS_AS( + lc_test.increment_distances(distances, interaction, -2), + LexicographicalComparisonError); + + interaction = {nodes[1], nodes[0]}; + lc_test.increment_distances(distances, interaction, 2); + REQUIRE(distances[0] == 0); + REQUIRE(distances[1] == 2); + } +} + +SCENARIO( + "Test LexicographicalComparison::get_updated_distances, five node " + "architecture") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("test_node", 3), Node("test_node", 4)}; + // n0 -- n1 -- n2 + // | + // n3 + // | + // n4 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[1], nodes[3]}, + {nodes[3], nodes[4]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + interacting_nodes_t interacting_nodes = { + {nodes[0], nodes[3]}, + {nodes[3], nodes[0]}, + {nodes[2], nodes[4]}, + {nodes[4], nodes[2]}}; + + LexicographicalComparison lc_test(shared_arc, interacting_nodes); + GIVEN("Two identical legal swap, one node in interaction.") { + swap_t swap_12 = {nodes[1], nodes[2]}; + swap_t swap_21 = {nodes[1], nodes[2]}; + lexicographical_distances_t distances_12 = + lc_test.get_updated_distances(swap_12); + REQUIRE(distances_12.size() == 3); + REQUIRE(distances_12[0] == 0); + REQUIRE(distances_12[1] == 4); + REQUIRE(distances_12[2] == 0); + REQUIRE(distances_12 == lc_test.get_updated_distances(swap_21)); + } + GIVEN("Two identical legal swap, both node in interaction.") { + swap_t swap_34 = {nodes[3], nodes[4]}; + swap_t swap_43 = {nodes[4], nodes[3]}; + lexicographical_distances_t distances_34 = + lc_test.get_updated_distances(swap_34); + REQUIRE(distances_34.size() == 3); + REQUIRE(distances_34[0] == 2); + REQUIRE(distances_34[1] == 2); + REQUIRE(distances_34[2] == 0); + REQUIRE(distances_34 == lc_test.get_updated_distances(swap_43)); + } + GIVEN("Illegal swap.") { + // illegal swap -> as Node not in architecture will return unchanged + swap_t swap_illegal = {Node("bad_node", 0), Node("bad_node", 9)}; + lexicographical_distances_t distances_illegal = + lc_test.get_updated_distances(swap_illegal); + REQUIRE(distances_illegal == lc_test.get_lexicographical_distances()); + } + GIVEN("Swap between two qubits in already adjacent interaction.") { + interacting_nodes_t interacting = { + {nodes[0], nodes[1]}, {nodes[3], nodes[4]}}; + LexicographicalComparison lc_in(shared_arc, interacting); + swap_t swap_01 = {nodes[0], nodes[1]}; + swap_t swap_10 = {nodes[1], nodes[0]}; + swap_t swap_34 = {nodes[3], nodes[4]}; + swap_t swap_43 = {nodes[4], nodes[3]}; + lexicographical_distances_t distances_01 = + lc_in.get_updated_distances(swap_01); + lexicographical_distances_t distances_10 = + lc_in.get_updated_distances(swap_10); + lexicographical_distances_t distances_34 = + lc_in.get_updated_distances(swap_34); + lexicographical_distances_t distances_43 = + lc_in.get_updated_distances(swap_43); + lexicographical_distances_t base_distances = + lc_in.get_lexicographical_distances(); + lexicographical_distances_t comp = {0, 0, 4}; + REQUIRE(base_distances == comp); + REQUIRE(distances_01 == base_distances); + REQUIRE(distances_10 == base_distances); + REQUIRE(distances_34 == base_distances); + REQUIRE(distances_43 == base_distances); + } +} + +SCENARIO("Test LexicographicalComparison::remove_swaps_lexicographical") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("test_node", 3), Node("test_node", 4)}; + // n0 -- n1 -- n2 + // | + // n3 + // | + // n4 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[1], nodes[3]}, + {nodes[3], nodes[4]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + interacting_nodes_t interacting_nodes = { + {nodes[0], nodes[3]}, + {nodes[3], nodes[0]}, + {nodes[2], nodes[4]}, + {nodes[4], nodes[2]}}; + + LexicographicalComparison lc_test(shared_arc, interacting_nodes); + GIVEN("Single Swap.") { + swap_t swap_01 = {nodes[0], nodes[1]}; + swap_set_t candidate_swaps = {swap_01}; + lc_test.remove_swaps_lexicographical(candidate_swaps); + REQUIRE(candidate_swaps.size() == 1); + REQUIRE(*candidate_swaps.begin() == swap_01); + } + GIVEN("Two Swap, both identical.") { + swap_t swap_01 = {nodes[0], nodes[1]}; + swap_t swap_10 = {nodes[1], nodes[0]}; + swap_set_t candidate_swaps = {swap_01, swap_10}; + lc_test.remove_swaps_lexicographical(candidate_swaps); + REQUIRE(candidate_swaps.size() == 2); + } + GIVEN("Swap on all edges.") { + swap_t swap_01 = {nodes[0], nodes[1]}; + swap_t swap_12 = {nodes[1], nodes[2]}; + swap_t swap_13 = {nodes[1], nodes[3]}; + swap_t swap_34 = {nodes[3], nodes[4]}; + swap_set_t candidate_swaps = {swap_01, swap_12, swap_13, swap_34}; + lc_test.remove_swaps_lexicographical(candidate_swaps); + REQUIRE(candidate_swaps.size() == 1); + } +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MappingFrontier.cpp b/tket/tests/test_MappingFrontier.cpp new file mode 100644 index 0000000000..6bc4ae1c11 --- /dev/null +++ b/tket/tests/test_MappingFrontier.cpp @@ -0,0 +1,959 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "Circuit/ClassicalExpBox.hpp" +#include "Mapping/MappingManager.hpp" + +namespace tket { + +SCENARIO("Test MappingFrontier initialisation, advance_frontier_boundary.") { + GIVEN("A typical Circuit and Architecture with uninitialised boundary") { + Circuit circ; + circ.add_q_register("test_nodes", 4); + + std::vector qubits = circ.all_qubits(); + + Vertex v1 = circ.add_op(OpType::X, {qubits[0]}); + Vertex v8 = circ.add_op(OpType::S, {qubits[3]}); + Vertex v9 = circ.add_op(OpType::T, {qubits[3]}); + Vertex v2 = circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + Vertex v3 = circ.add_op(OpType::CY, {qubits[2], qubits[3]}); + Vertex v4 = circ.add_op(OpType::H, {qubits[0]}); + Vertex v5 = circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + Vertex v6 = circ.add_op(OpType::Y, {qubits[0]}); + Vertex v7 = circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier m(circ); + MappingFrontier mf(m); + mf.advance_frontier_boundary(shared_arc); + + VertPort vp0 = mf.linear_boundary->get().find(nodes[0])->second; + VertPort vp1 = mf.linear_boundary->get().find(nodes[1])->second; + VertPort vp2 = mf.linear_boundary->get().find(nodes[2])->second; + VertPort vp3 = mf.linear_boundary->get().find(nodes[3])->second; + + Edge e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + Edge e1 = mf.circuit_.get_nth_out_edge(vp1.first, vp1.second); + Edge e2 = mf.circuit_.get_nth_out_edge(vp2.first, vp2.second); + Edge e3 = mf.circuit_.get_nth_out_edge(vp3.first, vp3.second); + + REQUIRE(mf.circuit_.source(e0) == v4); + REQUIRE(mf.circuit_.target(e0) == v5); + REQUIRE(mf.circuit_.source(e1) == v2); + REQUIRE(mf.circuit_.target(e1) == v7); + REQUIRE( + mf.circuit_.get_OpType_from_Vertex(mf.circuit_.source(e2)) == + OpType::Input); + REQUIRE(mf.circuit_.target(e2) == v3); + REQUIRE(mf.circuit_.source(e3) == v9); + REQUIRE(mf.circuit_.target(e3) == v3); + + mf.advance_frontier_boundary(shared_arc); + // check that advance_frontier_boundary doesn't incorrectly move boundary + // forwards + vp0 = mf.linear_boundary->get().find(nodes[0])->second; + vp1 = mf.linear_boundary->get().find(nodes[1])->second; + vp2 = mf.linear_boundary->get().find(nodes[2])->second; + vp3 = mf.linear_boundary->get().find(nodes[3])->second; + + e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + e1 = mf.circuit_.get_nth_out_edge(vp1.first, vp1.second); + e2 = mf.circuit_.get_nth_out_edge(vp2.first, vp2.second); + e3 = mf.circuit_.get_nth_out_edge(vp3.first, vp3.second); + + REQUIRE(mf.circuit_.source(e0) == v4); + REQUIRE(mf.circuit_.target(e0) == v5); + REQUIRE(mf.circuit_.source(e1) == v2); + REQUIRE(mf.circuit_.target(e1) == v7); + REQUIRE( + mf.circuit_.get_OpType_from_Vertex(mf.circuit_.source(e2)) == + OpType::Input); + REQUIRE(mf.circuit_.target(e2) == v3); + REQUIRE(mf.circuit_.source(e3) == v9); + REQUIRE(mf.circuit_.target(e3) == v3); + } + + GIVEN("A circuit with measurements and classically controlled operations") { + Circuit circ(3, 1); + std::vector qubits = circ.all_qubits(); + // All gates are physically permitted + Vertex v0 = circ.add_op(OpType::Measure, {0, 0}); + Vertex v1 = + circ.add_conditional_gate(OpType::Rx, {0.6}, {0}, {0}, 1); + Vertex v2 = + circ.add_conditional_gate(OpType::Rz, {0.6}, {1}, {0}, 1); + Vertex v3 = circ.add_op(OpType::X, {2}); + std::vector nodes = {Node(0), Node(1), Node(2)}; + + Architecture arc({{nodes[0], nodes[1]}, {nodes[1], nodes[2]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + mf.advance_frontier_boundary(shared_arc); + VertPort vp0 = mf.linear_boundary->get().find(nodes[0])->second; + VertPort vp1 = mf.linear_boundary->get().find(nodes[1])->second; + VertPort vp2 = mf.linear_boundary->get().find(nodes[2])->second; + Op_ptr op = circ.get_Op_ptr_from_Vertex(vp0.first); + Op_ptr op2 = circ.get_Op_ptr_from_Vertex(vp1.first); + Op_ptr op3 = circ.get_Op_ptr_from_Vertex(vp2.first); + REQUIRE(vp0.first == v1); + REQUIRE(vp1.first == v2); + REQUIRE(vp2.first == v3); + } + GIVEN( + "A circuit with multi edge bundles of booleans, conditional gates with " + "multiple inputs, conditional 2-qubit gates.") { + Circuit circ(4, 4); + + Vertex v0 = + circ.add_conditional_gate(OpType::X, {}, {0}, {0, 1}, 1); + Vertex v1 = circ.add_conditional_gate(OpType::Y, {}, {1}, {1}, 0); + Vertex v2 = circ.add_op(OpType::CX, {1, 2}); + Vertex v3 = circ.add_measure(2, 2); + Vertex v4 = circ.add_op(OpType::CX, {3, 2}); + Vertex v5 = circ.add_measure(3, 3); + Vertex v6 = + circ.add_conditional_gate(OpType::Z, {}, {3}, {1, 2}, 0); + Vertex v7 = circ.add_measure(3, 3); + Vertex v8 = circ.add_barrier( + {Qubit(0), Qubit(1), Qubit(2), Qubit(3), Bit(1), Bit(2), Bit(3)}); + Vertex v9 = + circ.add_conditional_gate(OpType::Z, {}, {3}, {1, 2}, 0); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + std::vector qubits = circ.all_qubits(); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + + circ.rename_units(rename_map); + std::vector bits = circ.all_bits(); + MappingFrontier mf(circ); + + REQUIRE( + mf.boolean_boundary->get().find(bits[0]) != + mf.boolean_boundary->get().end()); + REQUIRE( + mf.boolean_boundary->get().find(bits[1]) != + mf.boolean_boundary->get().end()); + REQUIRE( + mf.boolean_boundary->get().find(bits[2]) == + mf.boolean_boundary->get().end()); + REQUIRE( + mf.boolean_boundary->get().find(bits[3]) == + mf.boolean_boundary->get().end()); + + mf.advance_frontier_boundary(shared_arc); + + VertPort vp_q_0 = mf.linear_boundary->get().find(nodes[0])->second; + VertPort vp_q_1 = mf.linear_boundary->get().find(nodes[1])->second; + VertPort vp_q_2 = mf.linear_boundary->get().find(nodes[2])->second; + VertPort vp_q_3 = mf.linear_boundary->get().find(nodes[3])->second; + // note c[0] and c[1] not linear_boundary as they are immediately boolean + VertPort vp_b_2 = mf.linear_boundary->get().find(bits[2])->second; + VertPort vp_b_3 = mf.linear_boundary->get().find(bits[3])->second; + + REQUIRE( + circ.get_OpType_from_Vertex(circ.target(circ.get_nth_out_edge( + vp_q_0.first, vp_q_0.second))) == OpType::Output); + REQUIRE( + circ.get_OpType_from_Vertex(circ.target(circ.get_nth_out_edge( + vp_q_1.first, vp_q_1.second))) == OpType::Output); + REQUIRE( + circ.get_OpType_from_Vertex(circ.target(circ.get_nth_out_edge( + vp_q_2.first, vp_q_2.second))) == OpType::Output); + REQUIRE( + circ.get_OpType_from_Vertex(circ.target(circ.get_nth_out_edge( + vp_q_3.first, vp_q_3.second))) == OpType::Output); + REQUIRE( + circ.get_OpType_from_Vertex(circ.target(circ.get_nth_out_edge( + vp_b_2.first, vp_b_2.second))) == OpType::ClOutput); + REQUIRE( + circ.get_OpType_from_Vertex(circ.target(circ.get_nth_out_edge( + vp_b_3.first, vp_b_3.second))) == OpType::ClOutput); + + // in and then removed from boolean boundary + REQUIRE( + mf.boolean_boundary->get().find(bits[2]) == + mf.boolean_boundary->get().end()); + // not in boolean boundary because bool not used in condition + REQUIRE( + mf.boolean_boundary->get().find(bits[3]) == + mf.boolean_boundary->get().end()); + } +} + +SCENARIO("Test MappingFrontier get_default_to_linear_boundary_unit_map") { + Circuit circ; + circ.add_q_register("test_nodes", 4); + std::vector qubits = circ.all_qubits(); + MappingFrontier mf(circ); + unit_map_t d_2_q = mf.get_default_to_linear_boundary_unit_map(); + REQUIRE(d_2_q[Qubit(0)] == qubits[0]); + REQUIRE(d_2_q[Qubit(1)] == qubits[1]); + REQUIRE(d_2_q[Qubit(2)] == qubits[2]); + REQUIRE(d_2_q[Qubit(3)] == qubits[3]); +} + +SCENARIO("Test MappingFrontier get_frontier_subcircuit.") { + GIVEN( + "A typical circuit, MappingFrontier with depth 1 and depth 3 " + "subcircuit returns, no renaming units.") { + Circuit circ; + circ.add_q_register("test_nodes", 4); + std::vector qubits = circ.all_qubits(); + + Vertex v1 = circ.add_op(OpType::X, {qubits[0]}); + Vertex v8 = circ.add_op(OpType::S, {qubits[3]}); + Vertex v9 = circ.add_op(OpType::T, {qubits[3]}); + Vertex v2 = circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + Vertex v3 = circ.add_op(OpType::CY, {qubits[2], qubits[3]}); + Vertex v4 = circ.add_op(OpType::H, {qubits[0]}); + Vertex v5 = circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + Vertex v6 = circ.add_op(OpType::Y, {qubits[0]}); + Vertex v7 = circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf_1(circ); + MappingFrontier mf_3(circ); + + mf_1.advance_frontier_boundary(shared_arc); + Subcircuit sc1 = mf_1.get_frontier_subcircuit(1, 7); + mf_3.advance_frontier_boundary(shared_arc); + Subcircuit sc3 = mf_3.get_frontier_subcircuit(3, 7); + + Circuit frontier_circuit_1 = mf_1.circuit_.subcircuit(sc1); + + Circuit comparison_circuit(4); + comparison_circuit.add_op(OpType::CY, {2, 3}); + REQUIRE(frontier_circuit_1 == comparison_circuit); + + Circuit frontier_circuit_3 = mf_3.circuit_.subcircuit(sc3); + comparison_circuit.add_op(OpType::CZ, {0, 2}); + comparison_circuit.add_op(OpType::Y, {0}); + comparison_circuit.add_op(OpType::CX, {3, 1}); + REQUIRE(frontier_circuit_3 == comparison_circuit); + } + + GIVEN( + "A typical circuit but with non-contiguous Qubit Labelling. " + "MappingFrontier with depth 1 and depth 3 " + "subcircuit returns, no renaming units.") { + Circuit circ(4); + Qubit q0("label_0", 1); + Qubit q1("label_1", 3); + Qubit q2("label_2", 0); + Qubit q3("label_3", 2); + std::vector qubits = {q0, q1, q2, q3}; + std::map new_units = { + {Qubit(0), q0}, {Qubit(1), q1}, {Qubit(2), q2}, {Qubit(3), q3}}; + circ.rename_units(new_units); + + Vertex v1 = circ.add_op(OpType::X, {qubits[0]}); + Vertex v8 = circ.add_op(OpType::S, {qubits[3]}); + Vertex v9 = circ.add_op(OpType::T, {qubits[3]}); + Vertex v2 = circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + Vertex v3 = circ.add_op(OpType::CY, {qubits[2], qubits[3]}); + Vertex v4 = circ.add_op(OpType::H, {qubits[0]}); + Vertex v5 = circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + Vertex v6 = circ.add_op(OpType::Y, {qubits[0]}); + Vertex v7 = circ.add_op(OpType::CX, {qubits[3], qubits[1]}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {q0, nodes[0]}, {q1, nodes[1]}, {q2, nodes[2]}, {q3, nodes[3]}}; + + circ.rename_units(rename_map); + + MappingFrontier mf_1(circ); + MappingFrontier mf_3(circ); + + mf_1.advance_frontier_boundary(shared_arc); + Subcircuit sc1 = mf_1.get_frontier_subcircuit(1, 7); + mf_3.advance_frontier_boundary(shared_arc); + Subcircuit sc3 = mf_3.get_frontier_subcircuit(3, 7); + + Circuit frontier_circuit_1 = mf_1.circuit_.subcircuit(sc1); + + frontier_circuit_1.rename_units( + mf_1.get_default_to_linear_boundary_unit_map()); + Circuit comparison_circuit(4); + std::map rename_map_default = { + {Qubit(0), nodes[0]}, + {Qubit(1), nodes[1]}, + {Qubit(2), nodes[2]}, + {Qubit(3), nodes[3]}}; + comparison_circuit.rename_units(rename_map_default); + comparison_circuit.add_op(OpType::CY, {nodes[2], nodes[3]}); + REQUIRE(frontier_circuit_1 == comparison_circuit); + Circuit frontier_circuit_3 = mf_3.circuit_.subcircuit(sc3); + frontier_circuit_3.rename_units( + mf_3.get_default_to_linear_boundary_unit_map()); + + comparison_circuit.add_op(OpType::CZ, {nodes[0], nodes[2]}); + comparison_circuit.add_op(OpType::Y, {nodes[0]}); + comparison_circuit.add_op(OpType::CX, {nodes[3], nodes[1]}); + REQUIRE(frontier_circuit_3 == comparison_circuit); + } +} + +SCENARIO("Test update_linear_boundary_uids.") { + Circuit circ(10); + std::vector qbs = circ.all_qubits(); + MappingFrontier mf(circ); + GIVEN("Empty relabelling.") { mf.update_linear_boundary_uids({}); } + GIVEN("Relabel some qubits to same qubit.") { + mf.update_linear_boundary_uids( + {{qbs[0], qbs[0]}, {qbs[2], qbs[2]}, {qbs[7], qbs[7]}}); + REQUIRE(mf.linear_boundary->get().find(qbs[0])->first == qbs[0]); + REQUIRE(mf.linear_boundary->get().find(qbs[2])->first == qbs[2]); + REQUIRE(mf.linear_boundary->get().find(qbs[7])->first == qbs[7]); + } + GIVEN("Relabel to already present qubit, check boundary has qubit removed.") { + mf.update_linear_boundary_uids({{qbs[0], qbs[1]}}); + REQUIRE(mf.linear_boundary->get().size() == 9); + } + GIVEN("Relabel to new UnitID.") { + mf.update_linear_boundary_uids({{qbs[0], Node("tn", 6)}}); + REQUIRE( + mf.linear_boundary->get().find(qbs[0]) == + mf.linear_boundary->get().end()); + } +} + +SCENARIO("Test permute_subcircuit_q_out_hole.") { + GIVEN("Quantum Boundary and Permutation have size mismatch.") { + Circuit circ(0); + circ.add_q_register("test_nodes", 4); + Qubit q0("test_nodes", 0); + Qubit q1("test_nodes", 1); + Qubit q2("test_nodes", 2); + Qubit q3("test_nodes", 3); + + circ.add_op(OpType::X, {q0}); + circ.add_op(OpType::CX, {q0, q1}); + circ.add_op(OpType::CY, {q2, q3}); + circ.add_op(OpType::CZ, {q0, q2}); + circ.add_op(OpType::CX, {q3, q1}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {q0, nodes[0]}, {q1, nodes[1]}, {q2, nodes[2]}, {q3, nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + + mf.advance_frontier_boundary(shared_arc); + Subcircuit sc = mf.get_frontier_subcircuit(2, 5); + unit_map_t permutation = {{nodes[0], nodes[1]}}; + + REQUIRE_THROWS_AS( + mf.permute_subcircuit_q_out_hole(permutation, sc), + MappingFrontierError); + } + GIVEN( + "Quantum Boundary and permutation have same size, but UnitID don't " + "match.") { + Circuit circ(0); + circ.add_q_register("test_nodes", 4); + Qubit q0("test_nodes", 0); + Qubit q1("test_nodes", 1); + Qubit q2("test_nodes", 2); + Qubit q3("test_nodes", 3); + + circ.add_op(OpType::X, {q0}); + circ.add_op(OpType::CX, {q0, q1}); + circ.add_op(OpType::CY, {q2, q3}); + circ.add_op(OpType::CZ, {q0, q2}); + circ.add_op(OpType::CX, {q3, q1}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {q0, nodes[0]}, {q1, nodes[1]}, {q2, nodes[2]}, {q3, nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + + mf.advance_frontier_boundary(shared_arc); + Subcircuit sc = mf.get_frontier_subcircuit(2, 5); + unit_map_t permutation = { + {nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {Node(4), nodes[0]}}; + + REQUIRE_THROWS_AS( + mf.permute_subcircuit_q_out_hole(permutation, sc), + MappingFrontierError); + } + GIVEN("A four qubit subcircuit where every qubit is permuted by given map.") { + Circuit circ(0); + circ.add_q_register("test_nodes", 4); + Qubit q0("test_nodes", 0); + Qubit q1("test_nodes", 1); + Qubit q2("test_nodes", 2); + Qubit q3("test_nodes", 3); + + circ.add_op(OpType::X, {q0}); + circ.add_op(OpType::CX, {q0, q1}); + circ.add_op(OpType::CY, {q2, q3}); + circ.add_op(OpType::CZ, {q0, q2}); + circ.add_op(OpType::CX, {q3, q1}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {q0, nodes[0]}, {q1, nodes[1]}, {q2, nodes[2]}, {q3, nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + + mf.advance_frontier_boundary(shared_arc); + Subcircuit sc = mf.get_frontier_subcircuit(2, 5); + // assume only 1 subcircuit + EdgeVec original_q_out = sc.q_out_hole; + + unit_map_t permutation = { + {nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[0]}}; + mf.permute_subcircuit_q_out_hole(permutation, sc); + + EdgeVec permuted_q_out = sc.q_out_hole; + + REQUIRE(original_q_out[1] == permuted_q_out[0]); + REQUIRE(original_q_out[2] == permuted_q_out[1]); + REQUIRE(original_q_out[3] == permuted_q_out[2]); + REQUIRE(original_q_out[0] == permuted_q_out[3]); + } + GIVEN("A four qubit subcircuit with a partial permutation.") { + Circuit circ(0); + circ.add_q_register("test_nodes", 4); + Qubit q0("test_nodes", 0); + Qubit q1("test_nodes", 1); + Qubit q2("test_nodes", 2); + Qubit q3("test_nodes", 3); + + Vertex v1 = circ.add_op(OpType::X, {q0}); + Vertex v2 = circ.add_op(OpType::CX, {q0, q1}); + Vertex v3 = circ.add_op(OpType::CY, {q2, q3}); + Vertex v5 = circ.add_op(OpType::CZ, {q0, q2}); + Vertex v7 = circ.add_op(OpType::CX, {q3, q1}); + + std::vector nodes = {Node(0), Node(1), Node(2), Node(3)}; + + Architecture arc( + {{nodes[0], nodes[1]}, {nodes[1], nodes[3]}, {nodes[2], nodes[1]}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::map rename_map = { + {q0, nodes[0]}, {q1, nodes[1]}, {q2, nodes[2]}, {q3, nodes[3]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + + mf.advance_frontier_boundary(shared_arc); + Subcircuit sc = mf.get_frontier_subcircuit(2, 5); + // assume only 1 subcircuit + EdgeVec original_q_out = sc.q_out_hole; + + unit_map_t permutation = { + {nodes[0], nodes[1]}, + {nodes[1], nodes[0]}, + {nodes[2], nodes[2]}, + {nodes[3], nodes[3]}}; + mf.permute_subcircuit_q_out_hole(permutation, sc); + + EdgeVec permuted_q_out = sc.q_out_hole; + + REQUIRE(original_q_out[1] == permuted_q_out[0]); + REQUIRE(original_q_out[0] == permuted_q_out[1]); + REQUIRE(original_q_out[2] == permuted_q_out[2]); + REQUIRE(original_q_out[3] == permuted_q_out[3]); + } +} +SCENARIO("Test MappingFrontier::advance_next_2qb_slice") { + std::vector nodes = {Node("test_node", 0), Node("test_node", 1), + Node("test_node", 2), Node("node_test", 3), + Node("node_test", 4), Node("node_test", 5), + Node("test_node", 6), Node("node_test", 7)}; + // n0 -- n1 -- n2 -- n3 -- n4 + // | | + // n5 n7 + // | + // n6 + Architecture architecture( + {{nodes[0], nodes[1]}, + {nodes[1], nodes[2]}, + {nodes[2], nodes[3]}, + {nodes[3], nodes[4]}, + {nodes[2], nodes[5]}, + {nodes[5], nodes[6]}, + {nodes[3], nodes[7]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("One CX to find in next slice.") { + Circuit circ(8); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::X, {qubits[7]}); + circ.add_op(OpType::CX, {qubits[2], qubits[7]}); + // n7 + // | + // n0 -- n1 -- n2 -- n3 -- n4 + // | + // n5 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + // gets to first two cx + mf.advance_frontier_boundary(shared_arc); + + VertPort vp0 = mf.linear_boundary->get().find(nodes[0])->second; + VertPort vp4 = mf.linear_boundary->get().find(nodes[4])->second; + VertPort vp6 = mf.linear_boundary->get().find(nodes[6])->second; + VertPort vp7 = mf.linear_boundary->get().find(nodes[7])->second; + + Edge e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + Edge e4 = mf.circuit_.get_nth_out_edge(vp4.first, vp4.second); + Edge e6 = mf.circuit_.get_nth_out_edge(vp6.first, vp6.second); + Edge e7 = mf.circuit_.get_nth_out_edge(vp7.first, vp7.second); + + Vertex v0 = mf.circuit_.target(e0); + Vertex v4 = mf.circuit_.target(e4); + Vertex v6 = mf.circuit_.target(e6); + Vertex v7 = mf.circuit_.target(e7); + + REQUIRE(v0 == v4); + REQUIRE(v6 == v7); + + mf.advance_next_2qb_slice(5); + VertPort vp2 = mf.linear_boundary->get().find(nodes[2])->second; + vp7 = mf.linear_boundary->get().find(nodes[7])->second; + + Edge e2 = mf.circuit_.get_nth_out_edge(vp2.first, vp2.second); + e7 = mf.circuit_.get_nth_out_edge(vp7.first, vp7.second); + + Vertex v2 = mf.circuit_.target(e2); + v7 = mf.circuit_.target(e7); + + REQUIRE(v2 == v7); + } + GIVEN( + "Three CX to find in next slice 1, Two CX and one CZ in next slice 2. ") { + Circuit circ(8); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[4]}); + circ.add_op(OpType::CX, {qubits[6], qubits[7]}); + circ.add_op(OpType::CX, {qubits[2], qubits[7]}); + circ.add_op(OpType::CX, {qubits[0], qubits[5]}); + circ.add_op(OpType::X, {qubits[0]}); + circ.add_op(OpType::CX, {qubits[4], qubits[1]}); + circ.add_op(OpType::CX, {qubits[2], qubits[0]}); + circ.add_op(OpType::X, {qubits[1]}); + circ.add_op(OpType::CX, {qubits[4], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[7]}); + // n7 + // | + // n0 -- n1 -- n2 -- n3 -- n4 + // | + // n5 + // | + // n6 + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}, {qubits[4], nodes[4]}, {qubits[5], nodes[5]}, + {qubits[6], nodes[6]}, {qubits[7], nodes[7]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + // gets to first two cx + mf.advance_frontier_boundary(shared_arc); + + VertPort vp0 = mf.linear_boundary->get().find(nodes[0])->second; + VertPort vp4 = mf.linear_boundary->get().find(nodes[4])->second; + VertPort vp6 = mf.linear_boundary->get().find(nodes[6])->second; + VertPort vp7 = mf.linear_boundary->get().find(nodes[7])->second; + + Edge e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + Edge e4 = mf.circuit_.get_nth_out_edge(vp4.first, vp4.second); + Edge e6 = mf.circuit_.get_nth_out_edge(vp6.first, vp6.second); + Edge e7 = mf.circuit_.get_nth_out_edge(vp7.first, vp7.second); + + Vertex v0 = mf.circuit_.target(e0); + Vertex v4 = mf.circuit_.target(e4); + Vertex v6 = mf.circuit_.target(e6); + Vertex v7 = mf.circuit_.target(e7); + + // get edges + // then get target... + REQUIRE(v0 == v4); + REQUIRE(v6 == v7); + + mf.advance_next_2qb_slice(1); + vp0 = mf.linear_boundary->get().find(nodes[0])->second; + VertPort vp1 = mf.linear_boundary->get().find(nodes[1])->second; + VertPort vp2 = mf.linear_boundary->get().find(nodes[2])->second; + vp4 = mf.linear_boundary->get().find(nodes[4])->second; + VertPort vp5 = mf.linear_boundary->get().find(nodes[5])->second; + vp7 = mf.linear_boundary->get().find(nodes[7])->second; + + e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + Edge e1 = mf.circuit_.get_nth_out_edge(vp1.first, vp1.second); + Edge e2 = mf.circuit_.get_nth_out_edge(vp2.first, vp2.second); + e4 = mf.circuit_.get_nth_out_edge(vp4.first, vp4.second); + Edge e5 = mf.circuit_.get_nth_out_edge(vp5.first, vp5.second); + e7 = mf.circuit_.get_nth_out_edge(vp7.first, vp7.second); + + v0 = mf.circuit_.target(e0); + Vertex v1 = mf.circuit_.target(e1); + Vertex v2 = mf.circuit_.target(e2); + v4 = mf.circuit_.target(e4); + Vertex v5 = mf.circuit_.target(e5); + v7 = mf.circuit_.target(e7); + + REQUIRE(v1 == v4); + REQUIRE(v0 == v5); + REQUIRE(v2 == v7); + + mf.advance_next_2qb_slice(1); + vp0 = mf.linear_boundary->get().find(nodes[0])->second; + vp1 = mf.linear_boundary->get().find(nodes[1])->second; + vp2 = mf.linear_boundary->get().find(nodes[2])->second; + VertPort vp3 = mf.linear_boundary->get().find(nodes[3])->second; + vp4 = mf.linear_boundary->get().find(nodes[4])->second; + vp7 = mf.linear_boundary->get().find(nodes[7])->second; + + e0 = mf.circuit_.get_nth_out_edge(vp0.first, vp0.second); + e1 = mf.circuit_.get_nth_out_edge(vp1.first, vp1.second); + e2 = mf.circuit_.get_nth_out_edge(vp2.first, vp2.second); + Edge e3 = mf.circuit_.get_nth_out_edge(vp3.first, vp3.second); + e4 = mf.circuit_.get_nth_out_edge(vp4.first, vp4.second); + e7 = mf.circuit_.get_nth_out_edge(vp7.first, vp7.second); + + v0 = mf.circuit_.target(e0); + v1 = mf.circuit_.target(e1); + v2 = mf.circuit_.target(e2); + Vertex v3 = mf.circuit_.target(e3); + v4 = mf.circuit_.target(e4); + v7 = mf.circuit_.target(e7); + + REQUIRE(v0 == v2); + REQUIRE(v1 == v4); + REQUIRE(v3 == v7); + } +} +SCENARIO("Test MappingFrontier::add_qubit") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[2]}); + std::map rename_map = { + {qubits[0], nodes[0]}, {qubits[1], nodes[1]}, {qubits[2], nodes[2]}}; + circ.rename_units(rename_map); + + MappingFrontier mf(circ); + mf.add_ancilla(nodes[3]); + + REQUIRE(circ.all_qubits().size() == 4); + REQUIRE(mf.circuit_.all_qubits().size() == 4); + REQUIRE(mf.linear_boundary->size() == 4); + REQUIRE(mf.linear_boundary->find(nodes[3]) != mf.linear_boundary->end()); +} + +SCENARIO("Test MappingFrontier::add_swap") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + mf.add_swap(nodes[0], nodes[1]); + + std::vector commands = mf.circuit_.get_commands(); + REQUIRE(commands.size() == 4); + Command swap_c = commands[0]; + unit_vector_t uids = {nodes[0], nodes[1]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + Command cx_c = commands[1]; + uids = {nodes[1], nodes[0]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CX)); + + cx_c = commands[2]; + uids = {nodes[0], nodes[2]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CX)); + + cx_c = commands[3]; + uids = {nodes[0], nodes[3]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CZ)); + + Node new_node("new_node", 8); + mf.add_swap(nodes[0], new_node); + + commands = mf.circuit_.get_commands(); + REQUIRE(commands.size() == 5); + swap_c = commands[0]; + uids = {nodes[0], nodes[1]}; + + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + swap_c = commands[1]; + uids = {nodes[0], new_node}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + cx_c = commands[2]; + uids = {nodes[1], new_node}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CX)); + + cx_c = commands[3]; + uids = {new_node, nodes[2]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CX)); + + cx_c = commands[4]; + uids = {new_node, nodes[3]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CZ)); +} + +SCENARIO("Test MappingFrontier::add_swap, classical wires edge case") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + Circuit circ(4, 3); + std::vector qubits = circ.all_qubits(); + std::vector bits = circ.all_bits(); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[2], qubits[3]}); + circ.add_measure(3, 0); + circ.add_conditional_gate( + OpType::Y, {}, {qubits[2]}, {bits[0], bits[1], bits[2]}, 3); + circ.add_conditional_gate(OpType::X, {}, {qubits[1]}, {bits[2]}, 1); + circ.add_op(OpType::CX, {qubits[2], qubits[0]}); + circ.add_op(OpType::CX, {qubits[3], qubits[0]}); + + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[0], nodes[2]}, {nodes[0], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + MappingFrontier mf(circ); + mf.advance_frontier_boundary(shared_arc); + mf.add_swap(qubits[0], qubits[2]); +} +SCENARIO("Test MappingFrontier::add_bridge") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[1], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + mf.add_bridge(nodes[0], nodes[2], nodes[1]); + + std::vector commands = mf.circuit_.get_commands(); + REQUIRE(commands.size() == 3); + Command bridge_c = commands[0]; + unit_vector_t uids = {nodes[0], nodes[2], nodes[1]}; + REQUIRE(bridge_c.get_args() == uids); + REQUIRE(*bridge_c.get_op_ptr() == *get_op_ptr(OpType::BRIDGE)); + + Command cx_c = commands[1]; + uids = {nodes[1], nodes[2]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CX)); + + cx_c = commands[2]; + uids = {nodes[1], nodes[3]}; + REQUIRE(cx_c.get_args() == uids); + REQUIRE(*cx_c.get_op_ptr() == *get_op_ptr(OpType::CZ)); +} +SCENARIO("Test MappingFrontier set_linear_boundary") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[1]}); + circ.add_op(OpType::CX, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + MappingFrontier mf(circ); + + unit_vertport_frontier_t copy; + for (const std::pair& pair : + mf.linear_boundary->get()) { + copy.insert({pair.first, pair.second}); + } + + VertPort vp0_c = copy.get().find(nodes[0])->second; + VertPort vp1_c = copy.get().find(nodes[1])->second; + VertPort vp2_c = copy.get().find(nodes[2])->second; + VertPort vp3_c = copy.get().find(nodes[3])->second; + + mf.advance_frontier_boundary(shared_arc); + + VertPort vp0_in = mf.linear_boundary->get().find(nodes[0])->second; + VertPort vp1_in = mf.linear_boundary->get().find(nodes[1])->second; + VertPort vp2_in = mf.linear_boundary->get().find(nodes[2])->second; + VertPort vp3_in = mf.linear_boundary->get().find(nodes[3])->second; + + REQUIRE(vp0_in.first != vp0_c.first); + REQUIRE(vp1_in.first != vp1_c.first); + REQUIRE(vp2_in.first != vp2_c.first); + REQUIRE(vp3_in.first != vp3_c.first); + + mf.set_linear_boundary(copy); + + vp0_in = mf.linear_boundary->get().find(nodes[0])->second; + vp1_in = mf.linear_boundary->get().find(nodes[1])->second; + vp2_in = mf.linear_boundary->get().find(nodes[2])->second; + vp3_in = mf.linear_boundary->get().find(nodes[3])->second; + + REQUIRE(vp0_in.first == vp0_c.first); + REQUIRE(vp1_in.first == vp1_c.first); + REQUIRE(vp2_in.first == vp2_c.first); + REQUIRE(vp3_in.first == vp3_c.first); +} + +SCENARIO("Test MappingFrontier maps checking") { + Circuit circ(3); + GIVEN("Valid maps") { + std::shared_ptr maps = std::make_shared(); + maps->initial.insert({Qubit(0), Qubit(0)}); + maps->final.insert({Qubit(0), Qubit(0)}); + maps->initial.insert({Qubit(1), Qubit(1)}); + maps->final.insert({Qubit(1), Qubit(1)}); + maps->initial.insert({Qubit(2), Qubit(2)}); + maps->final.insert({Qubit(2), Qubit(2)}); + MappingFrontier mf(circ, maps); + } + GIVEN("Maps with wrong size") { + std::shared_ptr maps = std::make_shared(); + maps->initial.insert({Qubit(0), Qubit(0)}); + maps->final.insert({Qubit(0), Qubit(0)}); + maps->initial.insert({Qubit(1), Qubit(1)}); + maps->final.insert({Qubit(1), Qubit(1)}); + REQUIRE_THROWS_AS(MappingFrontier(circ, maps), MappingFrontierError); + } + GIVEN("Uids not found in map") { + std::shared_ptr maps = std::make_shared(); + maps->initial.insert({Qubit(0), Node(0)}); + maps->final.insert({Qubit(0), Qubit(0)}); + maps->initial.insert({Qubit(1), Qubit(1)}); + maps->final.insert({Qubit(1), Qubit(1)}); + maps->initial.insert({Qubit(2), Qubit(2)}); + maps->final.insert({Qubit(2), Qubit(2)}); + + REQUIRE_THROWS_AS(MappingFrontier(circ, maps), MappingFrontierError); + } +} + +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MappingManager.cpp b/tket/tests/test_MappingManager.cpp new file mode 100644 index 0000000000..226ca020e2 --- /dev/null +++ b/tket/tests/test_MappingManager.cpp @@ -0,0 +1,97 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "Mapping/MappingManager.hpp" + +namespace tket { + +class TokenSwappingTester : public RoutingMethod { + public: + TokenSwappingTester(){}; + + /** + * @param mapping_frontier Contains boundary of routed/unrouted circuit for + * modifying + * @param architecture Architecture providing physical constraints + * @return Logical to Physical mapping at boundary due to modification. + * + */ + std::pair routing_method( + MappingFrontier_ptr& /*mapping_frontier*/, + const ArchitecturePtr& /*architecture*/) const { + Node node0("test_node", 0), node1("test_node", 1), node2("test_node", 2); + return {true, {{node0, node1}, {node1, node2}, {node2, node0}}}; + } +}; + +SCENARIO("Test MappingManager::route_circuit") { + Node node0("test_node", 0), node1("test_node", 1), node2("test_node", 2); + Architecture arc({{node0, node1}, {node1, node2}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + MappingManager test_mm(shared_arc); + std::vector test_vrm = {std::make_shared()}; + GIVEN("More qubits than architecture has qubits.") { + Circuit circ(5); + REQUIRE_THROWS_AS( + test_mm.route_circuit(circ, test_vrm), MappingManagerError); + } + GIVEN("Circuit unmodified.") { + Circuit circ(2); + REQUIRE(!test_mm.route_circuit(circ, test_vrm, false)); + } + GIVEN("No method can route circuit.") { + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + std::map rename_map = { + {qubits[0], node0}, {qubits[1], node1}, {qubits[2], node2}}; + circ.rename_units(rename_map); + REQUIRE_THROWS_AS( + test_mm.route_circuit(circ, test_vrm), MappingManagerError); + } + GIVEN("Method that invokes a permutation from token swapping stage.") { + Circuit circ(3); + std::vector qubits = circ.all_qubits(); + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + std::map rename_map = { + {qubits[0], node0}, {qubits[1], node1}, {qubits[2], node2}}; + circ.rename_units(rename_map); + std::vector test_ts_rm = { + std::make_shared()}; + test_mm.route_circuit(circ, test_ts_rm); + + std::vector commands = circ.get_commands(); + REQUIRE(commands.size() == 3); + Command c0 = commands[0]; + unit_vector_t uid_swap_12 = {node1, node2}; + REQUIRE(c0.get_args() == uid_swap_12); + REQUIRE(*c0.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + Command c1 = commands[1]; + unit_vector_t uid_swap_01 = {node0, node1}; + REQUIRE(c1.get_args() == uid_swap_01); + REQUIRE(*c1.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + + Command c2 = commands[2]; + unit_vector_t uid_cx_10 = {node1, node0}; + REQUIRE(c2.get_args() == uid_cx_10); + REQUIRE(*c2.get_op_ptr() == *get_op_ptr(OpType::CX)); + } +} +} // namespace tket diff --git a/tket/tests/test_MappingVerification.cpp b/tket/tests/test_MappingVerification.cpp new file mode 100644 index 0000000000..1bb3439d18 --- /dev/null +++ b/tket/tests/test_MappingVerification.cpp @@ -0,0 +1,115 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/Verification.hpp" +#include "Placement/Placement.hpp" +#include "testutil.hpp" + +namespace tket { +SCENARIO( + "Test validity of circuit against architecture using " + "respects_connectivity_constraints method.", + "[routing]") { + Architecture arc({{1, 0}, {1, 2}}); + + GIVEN("A simple CX circuit and a line_placement map.") { + Circuit circ(5); + add_2qb_gates(circ, OpType::CX, {{0, 1}, {0, 3}, {2, 4}, {1, 4}, {0, 4}}); + Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}}); + LinePlacement lp_obj(test_arc); + lp_obj.place(circ); + MappingManager mm(std::make_shared(test_arc)); + + REQUIRE( + mm.route_circuit(circ, {std::make_shared()})); + CHECK(respects_connectivity_constraints(circ, test_arc, false)); + } + GIVEN("A failing case, undirected") { + Circuit circ(3); + circ.add_op(OpType::CX, {0, 2}); + reassign_boundary(circ); + REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, false)); + } + GIVEN("A working case, undirected") { + Circuit circ(3); + circ.add_op(OpType::CX, {0, 1}); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false)); + } + GIVEN("A failing case, directed") { + Circuit circ(3); + circ.add_op(OpType::CX, {0, 1}); + reassign_boundary(circ); + REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A working case, directed") { + Circuit circ(3); + circ.add_op(OpType::CX, {1, 0}); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A failing case, undirected, with SWAP") { + Circuit circ(3); + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 2}); + + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {0, 1}); + reassign_boundary(circ); + REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, false)); + } + GIVEN("A working case, undirected, with SWAP") { + Circuit circ(3); + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 2}); + + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {0, 2}); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false)); + } + GIVEN("A failing case, directed, with SWAP") { + Circuit circ(3); + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); + + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {1, 0}); + reassign_boundary(circ); + REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A working case, directed, with SWAP") { + Circuit circ(3); + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); + + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {0, 1}); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false)); + } +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_MultiGateReorder.cpp b/tket/tests/test_MultiGateReorder.cpp new file mode 100644 index 0000000000..3c904306a6 --- /dev/null +++ b/tket/tests/test_MultiGateReorder.cpp @@ -0,0 +1,431 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include + +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/MultiGateReorder.hpp" +#include "Predicates/Predicates.hpp" +#include "Simulation/CircuitSimulator.hpp" +#include "Simulation/ComparisonFunctions.hpp" + +namespace tket { +SCENARIO("Reorder circuits") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("Simple CZ circuit.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + MappingFrontier_ptr mf = std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorder mr(shared_arc, mf); + mr.solve(20, 20); + std::vector commands = circ.get_commands(); + for (unsigned i = 0; i < 2; i++) { + std::vector nodes; + for (auto arg : commands[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[i].get_op_ptr(), nodes)); + } + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + } + + GIVEN("Simple CZ circuit 2.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[1], qubits[0]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[3], qubits[0]}); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[3], qubits[2]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + MappingFrontier_ptr mf = std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorder mr(shared_arc, mf); + mr.solve(20, 20); + std::vector commands = circ.get_commands(); + for (unsigned i = 0; i < 4; i++) { + std::vector nodes; + for (auto arg : commands[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[i].get_op_ptr(), nodes)); + } + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + } + GIVEN("Simple CZ circuit with single_qs.") { + Circuit circ(4, 1); + std::vector qubits = circ.all_qubits(); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[1], qubits[0]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::Rz, 0.5, {qubits[0]}); + circ.add_op(OpType::Rz, 0.5, {qubits[2]}); + circ.add_op(OpType::Rz, 0.5, {qubits[3]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::Measure, {qubits[2], Bit(0)}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[3], qubits[0]}); + // Physically valid operations + circ.add_op(OpType::H, {qubits[3]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[2]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + + MappingFrontier_ptr mf = std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorder mr(shared_arc, mf); + mr.solve(20, 20); + std::vector commands = circ.get_commands(); + for (unsigned i = 0; i < 2; i++) { + std::vector nodes; + for (auto arg : commands[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[i].get_op_ptr(), nodes)); + } + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + } + + GIVEN("Circuit with multi qubit gates.") { + Circuit circ(4, 1); + std::vector qubits = circ.all_qubits(); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[1], qubits[0]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + // Physically valid operations + circ.add_op(OpType::BRIDGE, {qubits[1], qubits[2], qubits[3]}); + circ.add_op(OpType::Rx, 0.5, {qubits[3]}); + circ.add_op(OpType::CX, {qubits[2], qubits[3]}); + circ.add_op(OpType::Rz, 0.5, {qubits[0]}); + circ.add_op(OpType::CRz, 0.5, {qubits[0], qubits[1]}); + circ.add_op(OpType::ZZPhase, 0.2, {qubits[0], qubits[1]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[3], qubits[0]}); + // Physically valid operations + circ.add_op(OpType::H, {qubits[3]}); + circ.add_op(OpType::CZ, {qubits[3], qubits[2]}); + + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + + MappingFrontier_ptr mf = std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorder mr(shared_arc, mf); + mr.solve(20, 20); + std::vector commands = circ.get_commands(); + for (unsigned i = 0; i < 6; i++) { + std::vector nodes; + for (auto arg : commands[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[i].get_op_ptr(), nodes)); + } + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + } +} + +SCENARIO("Reorder circuits with limited search space") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("Simple CZ circuit.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + MappingFrontier_ptr mf = std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorder mr(shared_arc, mf); + mr.solve(3, 3); + // Check only the first valid CZ get commuted to the front + std::vector commands = circ.get_commands(); + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[0].get_op_ptr(), + {Node(commands[0].get_args()[0]), Node(commands[0].get_args()[1])})); + REQUIRE(!mf->valid_boundary_operation( + shared_arc, commands[0].get_op_ptr(), + {Node(commands[1].get_args()[0]), Node(commands[1].get_args()[1])})); + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + } +} + +SCENARIO("Test MultiGateReorderRoutingMethod") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + GIVEN("Simple CZ circuit.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + // Physically invalid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + circ.add_op(OpType::CZ, {qubits[2], qubits[3]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + Circuit circ_copy(circ); + MappingFrontier_ptr mf = std::make_shared(circ); + mf->advance_frontier_boundary(shared_arc); + MultiGateReorderRoutingMethod mrrm; + + std::pair bool_init_map = + mrrm.routing_method(mf, shared_arc); + REQUIRE(bool_init_map.first); + REQUIRE(bool_init_map.second.size() == 0); + std::vector commands = circ.get_commands(); + for (unsigned i = 0; i < 5; i++) { + std::vector nodes; + for (auto arg : commands[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(mf->valid_boundary_operation( + shared_arc, commands[i].get_op_ptr(), nodes)); + } + const auto u = tket_sim::get_unitary(circ); + const auto u1 = tket_sim::get_unitary(circ_copy); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u, u1, tket_sim::MatrixEquivalence::EQUAL)); + + // Test with limits + Circuit circ2(circ_copy); + + MappingFrontier_ptr mf2 = std::make_shared(circ2); + mf2->advance_frontier_boundary(shared_arc); + MultiGateReorderRoutingMethod mrrm2(4, 4); + + std::pair bool_init_map2 = + mrrm2.routing_method(mf2, shared_arc); + REQUIRE(bool_init_map2.first); + REQUIRE(bool_init_map2.second.size() == 0); + std::vector commands2 = circ2.get_commands(); + for (unsigned i = 0; i < 4; i++) { + std::vector nodes; + for (auto arg : commands2[i].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(mf2->valid_boundary_operation( + shared_arc, commands2[i].get_op_ptr(), nodes)); + } + std::vector nodes; + for (auto arg : commands2[4].get_args()) { + nodes.push_back(Node(arg)); + } + REQUIRE(!mf2->valid_boundary_operation( + shared_arc, commands2[4].get_op_ptr(), nodes)); + const auto u2 = tket_sim::get_unitary(circ2); + REQUIRE(tket_sim::compare_statevectors_or_unitaries( + u2, u1, tket_sim::MatrixEquivalence::EQUAL)); + } +} + +SCENARIO("Test MappingManager with MultiGateReorderRoutingMethod") { + std::vector nodes = { + Node("test_node", 0), Node("test_node", 1), Node("test_node", 2), + Node("node_test", 3)}; + + // n0 -- n1 -- n2 -- n3 + Architecture architecture( + {{nodes[0], nodes[1]}, {nodes[1], nodes[2]}, {nodes[2], nodes[3]}}); + ArchitecturePtr shared_arc = std::make_shared(architecture); + + GIVEN("Simple CZ, CX circuit.") { + Circuit circ(4); + std::vector qubits = circ.all_qubits(); + + // Physically invalid operations + circ.add_op(OpType::CX, {qubits[0], qubits[2]}); + circ.add_op(OpType::CX, {qubits[1], qubits[3]}); + // Physically valid operations + circ.add_op(OpType::CX, {qubits[1], qubits[2]}); + circ.add_op(OpType::CZ, {qubits[0], qubits[1]}); + std::map rename_map = { + {qubits[0], nodes[0]}, + {qubits[1], nodes[1]}, + {qubits[2], nodes[2]}, + {qubits[3], nodes[3]}}; + circ.rename_units(rename_map); + MappingFrontier_ptr mf = std::make_shared(circ); + MappingManager mm(shared_arc); + // MultiGateReorderRoutingMethod should first commute the last two gates + // then only one swap is needed. + std::vector vrm = { + std::make_shared(), + std::make_shared(10)}; + bool res = mm.route_circuit(circ, vrm); + PredicatePtr routed_correctly = + std::make_shared(architecture); + REQUIRE(routed_correctly->verify(circ)); + REQUIRE(circ.count_gates(OpType::SWAP) == 1); + std::vector commands = circ.get_commands(); + REQUIRE(commands.size() == 5); + Command swap_c = commands[2]; + unit_vector_t uids = {nodes[1], nodes[2]}; + REQUIRE(swap_c.get_args() == uids); + REQUIRE(*swap_c.get_op_ptr() == *get_op_ptr(OpType::SWAP)); + } +} + +SCENARIO("Test JSON serialisation for MultiGateReorderRoutingMethod") { + GIVEN("MultiGateReorderRoutingMethod") { + nlohmann::json j_rm; + j_rm["name"] = "MultiGateReorderRoutingMethod"; + j_rm["depth"] = 3; + j_rm["size"] = 4; + MultiGateReorderRoutingMethod rm_loaded = + MultiGateReorderRoutingMethod::deserialize(j_rm); + nlohmann::json j_rm_serialised = rm_loaded.serialize(); + REQUIRE(j_rm == j_rm_serialised); + } + + GIVEN("RoutingMethod vector") { + nlohmann::json j_rms = { + { + {"name", "MultiGateReorderRoutingMethod"}, + {"depth", 3}, + {"size", 4}, + }, + { + {"name", "LexiRouteRoutingMethod"}, + {"depth", 3}, + }}; + std::vector rms = + j_rms.get>(); + nlohmann::json j_rms_serialised = rms; + REQUIRE(j_rms == j_rms_serialised); + } + + GIVEN("RoutingMethod vector II, Lexi and AAS") { + nlohmann::json j_rms = { + { + {"name", "MultiGateReorderRoutingMethod"}, + {"depth", 3}, + {"size", 4}, + }, + { + {"name", "LexiRouteRoutingMethod"}, + {"depth", 3}, + }, + { + {"name", "AASRouteRoutingMethod"}, + {"cnotsynthtype", 2}, + {"aaslookahead", 1}, + }, + { + {"name", "AASLabellingMethod"}, + }}; + std::vector rms = + j_rms.get>(); + nlohmann::json j_rms_serialised = rms; + REQUIRE(j_rms == j_rms_serialised); + } +} +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_Predicates.cpp b/tket/tests/test_Predicates.cpp index 95d57647c5..ef798cacec 100644 --- a/tket/tests/test_Predicates.cpp +++ b/tket/tests/test_Predicates.cpp @@ -13,7 +13,9 @@ // limitations under the License. #include +#include +#include "Placement/Placement.hpp" #include "Predicates/CompilationUnit.hpp" #include "Predicates/Predicates.hpp" #include "testutil.hpp" @@ -185,10 +187,14 @@ SCENARIO("Test routing-related predicates' meet and implication") { Node n0("test", 0); Node n1("test", 1); Node n2("test", 2); + Node n3("test", 3); Architecture arc1({{n0, n1}, {n1, n2}}); Architecture arc2({{n0, n1}, {n1, n2}, {n0, n2}}); Architecture arc3({{n0, n2}, {n0, n1}}); Architecture arc4({{n2, n0}, {n0, n1}}); + Architecture arc5({n0, n1, n2, n3}); + arc5.add_connection(n0, n1); + arc5.add_connection(n1, n2); Circuit circ(3); circ.add_op(OpType::CX, {0, 1}); @@ -204,11 +210,17 @@ SCENARIO("Test routing-related predicates' meet and implication") { PredicatePtr con2 = std::make_shared(arc2); PredicatePtr con3 = std::make_shared(arc3); PredicatePtr con4 = std::make_shared(arc4); + PredicatePtr con5 = std::make_shared(arc5); WHEN("Test implies") { REQUIRE(con1->implies(*con2)); REQUIRE(con4->implies(*con3)); // directedness doesn't matter REQUIRE_FALSE(con1->implies(*con3)); } + WHEN("Test implies (isolated nodes)") { + // https://github.com/CQCL/tket/issues/88 + REQUIRE(con1->implies(*con5)); + REQUIRE_FALSE(con5->implies(*con1)); + } WHEN("Test meet") { PredicatePtr meet_a = con1->meet(*con2); REQUIRE(meet_a->verify(circ)); diff --git a/tket/tests/test_Rebase.cpp b/tket/tests/test_Rebase.cpp index 18c0091869..be270515ac 100644 --- a/tket/tests/test_Rebase.cpp +++ b/tket/tests/test_Rebase.cpp @@ -16,6 +16,7 @@ #include #include "Circuit/Boxes.hpp" +#include "Circuit/CircPool.hpp" #include "CircuitsForTesting.hpp" #include "Simulation/CircuitSimulator.hpp" #include "Simulation/ComparisonFunctions.hpp" @@ -41,9 +42,8 @@ SCENARIO("Building rebases with rebase_factory") { auto blanker = [](const Expr&, const Expr&, const Expr&) { return Circuit(1); }; - OpTypeSet multiqs = {OpType::CX}; - OpTypeSet singleqs = {OpType::S, OpType::V, OpType::Rx}; - Transform t = Transforms::rebase_factory(multiqs, blank, singleqs, blanker); + OpTypeSet gates = {OpType::S, OpType::V, OpType::Rx, OpType::CX}; + Transform t = Transforms::rebase_factory(gates, blank, blanker); REQUIRE(!t.apply(c)); REQUIRE(copy == c); } @@ -57,9 +57,8 @@ SCENARIO("Building rebases with rebase_factory") { auto blanker = [](const Expr&, const Expr&, const Expr&) { return Circuit(1); }; - OpTypeSet multiqs = {OpType::CX}; - OpTypeSet singleqs = {OpType::S, OpType::V, OpType::H}; - Transform t = Transforms::rebase_factory(multiqs, blank, singleqs, blanker); + Transform t = Transforms::rebase_factory( + {OpType::S, OpType::V, OpType::H, OpType::CX}, blank, blanker); REQUIRE(t.apply(c)); REQUIRE(c.count_gates(OpType::CZ) == 0); REQUIRE(c.count_gates(OpType::CX) == 1); @@ -79,9 +78,8 @@ SCENARIO("Building rebases with rebase_factory") { auto blanker = [](const Expr&, const Expr&, const Expr&) { return Circuit(1); }; - OpTypeSet multiqs = {OpType::CRz}; - OpTypeSet singleqs = {OpType::S, OpType::V, OpType::H}; - Transform t = Transforms::rebase_factory(multiqs, cx, singleqs, blanker); + Transform t = Transforms::rebase_factory( + {OpType::S, OpType::V, OpType::H, OpType::CRz}, cx, blanker); REQUIRE(t.apply(c)); REQUIRE(c.count_gates(OpType::CZ) == 1); REQUIRE(c.count_gates(OpType::CX) == 0); @@ -101,9 +99,9 @@ SCENARIO("Building rebases with rebase_factory") { auto blanker = [](const Expr&, const Expr&, const Expr&) { return Circuit(1); }; - OpTypeSet multiqs = {OpType::CZ}; - OpTypeSet singleqs = {OpType::S, OpType::X, OpType::H, OpType::Sdg}; - Transform t = Transforms::rebase_factory(multiqs, cx, singleqs, blanker); + OpTypeSet gateset = { + OpType::S, OpType::X, OpType::H, OpType::Sdg, OpType::CZ}; + Transform t = Transforms::rebase_factory(gateset, cx, blanker); REQUIRE(t.apply(c)); REQUIRE(c.count_gates(OpType::CZ) == 1); REQUIRE(c.count_gates(OpType::CX) == 0); @@ -123,9 +121,8 @@ SCENARIO("Building rebases with rebase_factory") { auto blanker = [](const Expr&, const Expr&, const Expr&) { return Circuit(1); }; - OpTypeSet multiqs = {OpType::CX}; - OpTypeSet singleqs = {OpType::S, OpType::V, OpType::H}; - Transform t = Transforms::rebase_factory(multiqs, blank, singleqs, blanker); + OpTypeSet gateset = {OpType::S, OpType::V, OpType::H, OpType::CX}; + Transform t = Transforms::rebase_factory(gateset, blank, blanker); REQUIRE(t.apply(c)); REQUIRE(c.count_gates(OpType::CZ) == 0); REQUIRE(c.count_gates(OpType::CX) == 6); @@ -143,9 +140,8 @@ SCENARIO("Building rebases with rebase_factory") { auto blanker = [](const Expr&, const Expr&, const Expr&) { return Circuit(1); }; - OpTypeSet multiqs = {OpType::CX}; - OpTypeSet singleqs = {OpType::S, OpType::V, OpType::H}; - Transform t = Transforms::rebase_factory(multiqs, blank, singleqs, blanker); + OpTypeSet gateset = {OpType::S, OpType::V, OpType::H, OpType::CX}; + Transform t = Transforms::rebase_factory(gateset, blank, blanker); REQUIRE(t.apply(c)); REQUIRE(c.count_gates(OpType::CX) == 4); const StateVector s1 = tket_sim::get_statevector(c); @@ -163,9 +159,8 @@ SCENARIO("Building rebases with rebase_factory") { auto blanker = [](const Expr&, const Expr&, const Expr&) { return Circuit(1); }; - OpTypeSet multiqs = {OpType::CX}; - OpTypeSet singleqs = {OpType::S, OpType::V, OpType::H}; - Transform t = Transforms::rebase_factory(multiqs, blank, singleqs, blanker); + OpTypeSet gateset = {OpType::S, OpType::V, OpType::H, OpType::CX}; + Transform t = Transforms::rebase_factory(gateset, blank, blanker); REQUIRE(t.apply(c)); REQUIRE(c.count_gates(OpType::CX) == 4); StateVector s1 = tket_sim::get_statevector(c); @@ -187,9 +182,8 @@ SCENARIO("Building rebases with rebase_factory") { auto blanker = [](const Expr&, const Expr&, const Expr&) { return Circuit(1); }; - OpTypeSet multiqs = {OpType::CX}; - OpTypeSet singleqs = {OpType::TK1}; - Transform t = Transforms::rebase_factory(multiqs, blank, singleqs, tk1_map); + OpTypeSet gateset = {OpType::TK1, OpType::CX}; + Transform t = Transforms::rebase_factory(gateset, blank, tk1_map); REQUIRE(t.apply(c)); REQUIRE(c.count_gates(OpType::T) == 0); REQUIRE(c.count_gates(OpType::Rx) == 0); @@ -212,10 +206,8 @@ SCENARIO("Building rebases with rebase_factory") { u.add_op(OpType::Rz, alpha, {0}); return u; }; - OpTypeSet multiqs = {OpType::CX}; - OpTypeSet singleqs = {OpType::Rz, OpType::Rx}; - Transform t = - Transforms::rebase_factory(multiqs, blank, singleqs, rzrx_map); + OpTypeSet gateset = {OpType::Rz, OpType::Rx, OpType::CX}; + Transform t = Transforms::rebase_factory(gateset, blank, rzrx_map); REQUIRE(t.apply(c)); REQUIRE(c.count_gates(OpType::U3) == 0); REQUIRE(c.count_gates(OpType::Rx) == 2); @@ -238,10 +230,8 @@ SCENARIO("Building rebases with rebase_factory") { Transforms::remove_redundancies().apply(u); return u; }; - OpTypeSet multiqs = {OpType::CX}; - OpTypeSet singleqs = {OpType::Rz, OpType::Rx}; - Transform t = - Transforms::rebase_factory(multiqs, blank, singleqs, rzrx_map); + OpTypeSet gateset = {OpType::Rz, OpType::Rx, OpType::CX}; + Transform t = Transforms::rebase_factory(gateset, blank, rzrx_map); REQUIRE(t.apply(c)); REQUIRE(c.count_gates(OpType::T) == 0); REQUIRE(c.count_gates(OpType::U3) == 0); @@ -412,8 +402,7 @@ SCENARIO("Check each Clifford case for tk1_to_rzh") { Circuit correct(1); correct.add_op( OpType::TK1, {test.alpha, test.beta, test.gamma}, {0}); - Circuit result = - Transforms::tk1_to_rzh(test.alpha, test.beta, test.gamma); + Circuit result = CircPool::tk1_to_rzh(test.alpha, test.beta, test.gamma); REQUIRE(result.n_gates() == test.expected_gates); REQUIRE(test_unitary_comparison(correct, result)); } @@ -466,8 +455,8 @@ SCENARIO("Check cases for tk1_to_rzsx") { Circuit correct(1); correct.add_op( OpType::TK1, {test.alpha, test.beta, test.gamma}, {0}); - Circuit result = - Transforms::tk1_to_rzsx(test.alpha, test.beta, test.gamma); + Circuit result = CircPool::tk1_to_rzsx(test.alpha, test.beta, test.gamma); + Transforms::remove_redundancies().apply(result); REQUIRE(result.n_gates() == test.expected_gates); REQUIRE(test_unitary_comparison(correct, result)); } @@ -476,8 +465,7 @@ SCENARIO("Check cases for tk1_to_rzsx") { Circuit correct(1); correct.add_op( OpType::TK1, {test.alpha, test.beta, test.gamma}, {0}); - Circuit result = - Transforms::tk1_to_rzsx(test.alpha, test.beta, test.gamma); + Circuit result = CircPool::tk1_to_rzsx(test.alpha, test.beta, test.gamma); REQUIRE(result.n_gates() == test.expected_gates); } } diff --git a/tket/tests/test_Routing.cpp b/tket/tests/test_Routing.cpp deleted file mode 100644 index c734de1f71..0000000000 --- a/tket/tests/test_Routing.cpp +++ /dev/null @@ -1,2712 +0,0 @@ -// Copyright 2019-2022 Cambridge Quantum Computing -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include - -#include "Characterisation/DeviceCharacterisation.hpp" -#include "Circuit/Circuit.hpp" -#include "OpType/OpType.hpp" -#include "Predicates/CompilerPass.hpp" -#include "Predicates/PassGenerators.hpp" -#include "Predicates/Predicates.hpp" -#include "Routing/Routing.hpp" -#include "Routing/Verification.hpp" -#include "Simulation/CircuitSimulator.hpp" -#include "Simulation/ComparisonFunctions.hpp" -#include "Transformations/BasicOptimisation.hpp" -#include "Transformations/Decomposition.hpp" -#include "Transformations/OptimisationPass.hpp" -#include "Transformations/Rebase.hpp" -#include "Transformations/Transform.hpp" -#include "Utils/HelperFunctions.hpp" -#include "testutil.hpp" - -namespace tket { - -using Connection = Architecture::Connection; - -Interactions RoutingTester::get_interaction(const RoutingFrontier &sf) { - return router->generate_interaction_frontier(sf); -} - -// Wrappers of private methods for testing? -void RoutingTester::increment_distance( - graphs::dist_vec &new_dist_vector, const Swap &pair, int increment) const { - router->increment_distance(new_dist_vector, pair, increment); -} - -graphs::dist_vec RoutingTester::generate_distance_vector( - const Interactions &inter) const { - return router->generate_distance_vector(inter); -} - -graphs::dist_vec RoutingTester::update_distance_vector( - const Swap &nodes, graphs::dist_vec new_dist_vector, - const Interactions &inte) const { - return router->update_distance_vector(nodes, new_dist_vector, inte); -} - -const std::pair RoutingTester::pair_dists( - const Node &n1, const Node &p1, const Node &n2, const Node &p2) const { - return router->pair_dists(n1, p1, n2, p2); -} - -bool RoutingTester::swap_decreases( - const Swap &nodes, const Interactions &inte) const { - return router->swap_decreases(nodes, inte); -} - -std::vector RoutingTester::candidate_swaps( - const std::vector &trial_edges, - const Interactions &inte) const { - return router->candidate_swaps(trial_edges, inte); -} - -std::vector RoutingTester::cowtan_et_al_heuristic( - std::vector &candidate_swaps, const graphs::dist_vec &base_dists, - const Interactions &interac) const { - return router->cowtan_et_al_heuristic(candidate_swaps, base_dists, interac); -} - -void RoutingTester::update_qmap(qubit_bimap_t &map, const Swap &swap) { - router->update_qmap(map, swap); -} - -std::vector RoutingTester::path_to_swaps( - const std::vector &path) const { - return router->path_to_swaps(path); -} - -qubit_bimap_t default_qubit_map(const Circuit &circ) { - qubit_bimap_t qmap; - unsigned node = 0; - for (const Qubit &qb : circ.all_qubits()) { - qmap.insert({qb, Node(node)}); - node++; - } - return qmap; -} -qubit_bimap_t RoutingTester::set_default_initial_map( - std::optional canonical_node_order) { - qubit_bimap_t qmap; - unsigned node = 0; - for (const Qubit &qb : router->circ_.all_qubits()) { - if (canonical_node_order.has_value()) { - qmap.insert({qb, canonical_node_order->at(node)}); - } else { - qmap.insert({qb, Node(node)}); - } - node++; - } - router->init_map = qmap; - router->qmap = qmap; - return qmap; -} - -void RoutingTester::initialise_slicefrontier() { - router->slice_frontier_.init(); -} - -void RoutingTester::add_distributed_cx( - const Node &control_node, const Node &target_node, - const Node ¢ral_node) { - router->add_distributed_cx(control_node, target_node, central_node); -} - -std::pair, std::pair> -RoutingTester::check_distributed_cx(const Swap &nodes) { - return router->check_distributed_cx(nodes); -} - -void RoutingTester::advance_frontier() { router->advance_frontier(); } - -void RoutingTester::set_interaction() { - router->interaction = - router->generate_interaction_frontier(router->slice_frontier_); -} -void RoutingTester::set_qmap(qubit_bimap_t _qmap) { router->qmap = _qmap; } -void RoutingTester::set_config(const RoutingConfig &_config) { - router->config_ = _config; -} -void RoutingTester::next_sf(RoutingFrontier &sf) { sf.next_slicefrontier(); } -Circuit *RoutingTester::get_circ() { return &(router->circ_); } - -namespace test_Routing { - -SCENARIO( - "Test validity of circuit against architecture using " - "respects_connectivity_constraints method.", - "[routing]") { - Architecture arc({{1, 0}, {1, 2}}); - - GIVEN("A simple CX circuit and a line_placement map.") { - tket::Circuit circ(5); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {0, 3}, {2, 4}, {1, 4}, {0, 4}}); - tket::Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}}); - LinePlacement lp_obj(test_arc); - // qubit_mapping_t lm = lp_obj.place_get_maps(circ)[0]; - lp_obj.place(circ); - tket::Routing router(circ, test_arc); - std::pair outcirc = router.solve(); - REQUIRE(outcirc.second == true); - CHECK(respects_connectivity_constraints(outcirc.first, test_arc, false)); - } - GIVEN("A failing case, undirected") { - Circuit circ(3); - circ.add_op(OpType::CX, {0, 2}); - reassign_boundary(circ); - REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, false)); - } - GIVEN("A working case, undirected") { - Circuit circ(3); - circ.add_op(OpType::CX, {0, 1}); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false)); - } - GIVEN("A failing case, directed") { - Circuit circ(3); - circ.add_op(OpType::CX, {0, 1}); - reassign_boundary(circ); - REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A working case, directed") { - Circuit circ(3); - circ.add_op(OpType::CX, {1, 0}); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A failing case, undirected, with SWAP") { - Circuit circ(3); - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 2}); - - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {0, 1}); - reassign_boundary(circ); - REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, false)); - } - GIVEN("A working case, undirected, with SWAP") { - Circuit circ(3); - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 2}); - - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {0, 2}); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false)); - } - GIVEN("A failing case, directed, with SWAP") { - Circuit circ(3); - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); - - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {1, 0}); - reassign_boundary(circ); - REQUIRE_FALSE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A working case, directed, with SWAP") { - Circuit circ(3); - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); - - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {0, 1}); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false)); - } -} - -SCENARIO("Test decompose_SWAP_to_CX pass", "[routing]") { - Architecture arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 0}}); - GIVEN("A single SWAP gate. Finding if correct number of vertices added") { - Circuit circ(5); - circ.add_op(OpType::SWAP, {0, 1}); - int original_vertices = circ.n_vertices(); - reassign_boundary(circ); - Transforms::decompose_SWAP_to_CX().apply(circ); - int decompose_vertices = circ.n_vertices(); - REQUIRE(decompose_vertices - original_vertices == 2); - REQUIRE(respects_connectivity_constraints(circ, arc, false)); - } - GIVEN("A single SWAP gate, finding if correct path is preserved.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {0, 1}); - // check output boundary - Vertex boundary_0 = circ.get_out(Qubit(0)); - Vertex boundary_1 = circ.get_out(Qubit(1)); - Transforms::decompose_SWAP_to_CX().apply(circ); - REQUIRE(circ.get_out(Qubit(0)) == boundary_0); - REQUIRE(circ.get_out(Qubit(1)) == boundary_1); - // check output boundary is the same - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {0, 1}); - circ.add_op(OpType::CX, {0, 1}); - Transforms::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[0], all[1]}; - REQUIRE(circ.get_commands()[2].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite case.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {0, 1}); - circ.add_op(OpType::CX, {1, 0}); - Transforms::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[1], all[0]}; - REQUIRE(circ.get_commands()[2].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite SWAP.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {1, 0}); - circ.add_op(OpType::CX, {0, 1}); - Transforms::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[0], all[1]}; - REQUIRE(circ.get_commands()[2].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite case, opposite SWAP.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {1, 0}); - circ.add_op(OpType::CX, {1, 0}); - Transforms::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[1], all[0]}; - REQUIRE(circ.get_commands()[2].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite SWAP, pre CX.") { - Circuit circ(2); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::SWAP, {1, 0}); - Transforms::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[0], all[1]}; - REQUIRE(circ.get_commands()[1].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite case, opposite SWAP, pre CX.") { - Circuit circ(2); - circ.add_op(OpType::CX, {1, 0}); - circ.add_op(OpType::SWAP, {1, 0}); - Transforms::decompose_SWAP_to_CX().apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[1], all[0]}; - REQUIRE(circ.get_commands()[1].get_args() == cor); - } - GIVEN( - "A circuit that facilitates some CX annihilation for an undirected " - "architecture, opposite case, opposite SWAP, pre CX, directed bool " - "on.") { - Circuit circ(2); - circ.add_op(OpType::CX, {1, 0}); - circ.add_op(OpType::SWAP, {1, 0}); - reassign_boundary(circ); - Transforms::decompose_SWAP_to_CX(arc).apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[1], all[0]}; - REQUIRE(circ.get_commands()[1].get_args() == cor); - } - GIVEN("A circuit that with no CX gates, but with directed architecture.") { - Circuit circ(2); - circ.add_op(OpType::SWAP, {1, 0}); - reassign_boundary(circ); - Transforms::decompose_SWAP_to_CX(arc).apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[0], all[1]}; - REQUIRE(circ.get_commands()[0].get_args() == cor); - } - GIVEN( - "A circuit that with no CX gates, but with directed architecture, " - "opposite case.") { - Architecture dummy_arc({{1, 0}}); - Circuit circ(2); - circ.add_op(OpType::SWAP, {1, 0}); - reassign_boundary(circ); - Transforms::decompose_SWAP_to_CX(dummy_arc).apply(circ); - qubit_vector_t all = circ.all_qubits(); - unit_vector_t cor = {all[1], all[0]}; - REQUIRE(circ.get_commands()[0].get_args() == cor); - } - // TEST CIRCUIT - Circuit circ(10); - int count = 0; - for (unsigned x = 0; x < 10; ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - count += 2; - if (x % 2) { - add_2qb_gates(circ, OpType::SWAP, {{x, y}, {y + 1, y}}); - } else { - add_2qb_gates(circ, OpType::SWAP, {{y, x}, {y, y + 1}}); - } - } - } - - GIVEN("A network of SWAP gates.") { - int original_vertices = circ.n_vertices(); - std::vector original_boundary; - for (unsigned i = 0; i < circ.n_qubits(); i++) { - original_boundary.push_back(circ.get_out(Qubit(i))); - } - Transforms::decompose_SWAP_to_CX().apply(circ); - int decompose_vertices = circ.n_vertices(); - for (unsigned i = 0; i < circ.n_qubits(); i++) { - REQUIRE(original_boundary[i] == circ.get_out(Qubit(i))); - } - REQUIRE(decompose_vertices - original_vertices == 2 * count); - } - GIVEN("A routed network of SWAP gates.") { - SquareGrid grid(2, 5); - Routing router(circ, grid); - std::pair output = router.solve(); - REQUIRE(output.second); - circ = output.first; - Transforms::decompose_SWAP_to_CX().apply(circ); - REQUIRE(respects_connectivity_constraints(circ, grid, false, true)); - GIVEN("Directed CX gates") { - Transforms::decompose_SWAP_to_CX().apply(output.first); - Transforms::decompose_BRIDGE_to_CX().apply(output.first); - Transforms::decompose_CX_directed(grid).apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, grid, true)); - } - } -} - -SCENARIO("Test redirect_CX_gates pass", "[routing]") { - Architecture arc({{1, 0}, {1, 2}}); - GIVEN("A circuit that requires no redirection.") { - Circuit circ(3); - add_2qb_gates(circ, OpType::CX, {{1, 0}, {1, 2}}); - reassign_boundary(circ); - Transforms::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A circuit that requires redirection.") { - Circuit circ(3); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {2, 1}}); - reassign_boundary(circ); - Transforms::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A circuit that requires no redirection, with SWAP.") { - Circuit circ(3); - - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {0, 1}); - - swap_v = circ.add_op(OpType::SWAP, {0, 2}); - swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {2, 1}); - reassign_boundary(circ); - Transforms::decompose_SWAP_to_CX(arc).apply(circ); - Transforms::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A circuit that requires redirection, with SWAP.") { - Circuit circ(3); - - Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); - EdgeVec swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {1, 0}); - - swap_v = circ.add_op(OpType::SWAP, {0, 2}); - swap_outs = circ.get_all_out_edges(swap_v); - circ.dag[swap_outs[0]].ports.first = 1; - circ.dag[swap_outs[1]].ports.first = 0; - - circ.add_op(OpType::CX, {1, 2}); - - reassign_boundary(circ); - Transforms::decompose_SWAP_to_CX(arc).apply(circ); - Transforms::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true)); - } - GIVEN("A complicated circuit of CX gates, routed.") { - Circuit circ(12); - SquareGrid grid(3, 4); - - for (unsigned x = 0; x < 12; ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - if (x % 2) { - add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); - } else { - add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); - } - } - } - Routing route(circ, grid); - std::pair outs = route.solve(); - REQUIRE(outs.second == true); - circ = outs.first; - Transforms::decompose_BRIDGE_to_CX().apply(circ); - Transforms::decompose_SWAP_to_CX(arc).apply(circ); - Transforms::decompose_CX_directed(grid).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, grid, true)); - } -} - -SCENARIO("Test RoutingFrontiers and interaction vectors", "[routing]") { - GIVEN("A simple circuit") { - Circuit incirc(4); - Vertex v1 = incirc.add_op(OpType::X, {0}); - Vertex v8 = incirc.add_op(OpType::S, {3}); - Vertex v9 = incirc.add_op(OpType::T, {3}); - Vertex v2 = incirc.add_op(OpType::CX, {0, 1}); - Vertex v3 = incirc.add_op(OpType::CY, {2, 3}); - Vertex v4 = incirc.add_op(OpType::H, {0}); - Vertex v10 = incirc.add_op(OpType::X, {0}); - Vertex v11 = incirc.add_op(OpType::S, {1}); - Vertex v12 = incirc.add_op(OpType::Z, {3}); - Vertex v13 = incirc.add_op(OpType::Y, {2}); - Vertex v14 = incirc.add_op(OpType::T, {1}); - Vertex v5 = incirc.add_op(OpType::CZ, {0, 2}); - Vertex v6 = incirc.add_op(OpType::Y, {0}); - Vertex v7 = incirc.add_op(OpType::CX, {3, 1}); - - // Ring of size 4 - RingArch arc(4); - node_vector_t ring_nodes = arc.get_all_nodes_vec(); - // Create Routing Object - Routing router(incirc, arc); - RoutingTester tester(&router); - Circuit *circ = tester.get_circ(); - RoutingFrontier sf1 = router.get_slicefrontier(); - Qubit qb0(0); - Qubit qb1(1); - Qubit qb2(2); - Qubit qb3(3); - qubit_bimap_t qm; - for (unsigned i = 0; i < 4; ++i) { - qm.insert({Qubit(i), ring_nodes[i]}); - } - tester.set_qmap(qm); - WHEN("First interaction vector is generated") { - Interactions inte = tester.get_interaction(sf1); - THEN("Interaction vector is correct") { - CHECK(inte[ring_nodes.at(0)] == ring_nodes.at(1)); - CHECK(inte[ring_nodes.at(1)] == ring_nodes.at(0)); - CHECK(inte[ring_nodes.at(3)] == ring_nodes.at(2)); - CHECK(inte[ring_nodes.at(2)] == ring_nodes.at(3)); - REQUIRE(inte.size() == 4); - } - } - WHEN("One operation is completed") { - Edge new_0 = circ->skip_irrelevant_edges(circ->get_all_out_edges(v2)[0]); - Edge new_1 = circ->skip_irrelevant_edges(circ->get_all_out_edges(v2)[1]); - sf1.quantum_in_edges->replace( - sf1.quantum_in_edges->find(qb0), {qb0, new_0}); - sf1.quantum_in_edges->replace( - sf1.quantum_in_edges->find(qb1), {qb1, new_1}); - CutFrontier next_cut = circ->next_cut( - sf1.quantum_in_edges, std::make_shared()); - - sf1.slice = next_cut.slice; - sf1.quantum_out_edges = next_cut.u_frontier; - Interactions inte = tester.get_interaction(sf1); - THEN("Interaction vector is updated") { - CHECK(inte[ring_nodes.at(0)] == ring_nodes.at(0)); - CHECK(inte[ring_nodes.at(1)] == ring_nodes.at(1)); - CHECK(inte[ring_nodes.at(3)] == ring_nodes.at(2)); - CHECK(inte[ring_nodes.at(2)] == ring_nodes.at(3)); - REQUIRE(inte.size() == 4); - } - } - - WHEN("Next RoutingFrontier is generated") { - sf1.next_slicefrontier(); - THEN("The RoutingFrontier is correct") { - REQUIRE(sf1.slice->size() == 2); - CHECK( - circ->get_Op_ptr_from_Vertex(sf1.slice->at(0)) == - incirc.get_Op_ptr_from_Vertex(v5)); - CHECK( - circ->get_Op_ptr_from_Vertex(sf1.slice->at(1)) == - incirc.get_Op_ptr_from_Vertex(v7)); - - CHECK( - sf1.quantum_in_edges->find(qb1)->second != - circ->get_nth_out_edge(v2, 1)); - CHECK( - sf1.quantum_in_edges->find(qb2)->second == - circ->get_nth_in_edge(sf1.slice->at(0), 1)); - - CHECK( - sf1.quantum_out_edges->find(qb0)->second != - circ->get_nth_in_edge(v6, 0)); - CHECK( - sf1.quantum_out_edges->find(qb3)->second == - circ->get_nth_out_edge(sf1.slice->at(1), 0)); - } - sf1.next_slicefrontier(); - REQUIRE(sf1.slice->empty()); - } - } -} - -SCENARIO( - "Check that an already solved routing problem will not add unecessary " - "swaps", - "[routing]") { - GIVEN("A solved problem") { - // Test Circuit, sequential cxs on a ring, requires no routing - Circuit test_circuit; - test_circuit.add_blank_wires(4); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {1, 2}, {2, 3}, {3, 0}}); - - // Ring of size 4 - RingArch arc(4); - // Create Routing Object - Routing router(test_circuit, arc); - std::pair post_c = router.solve(); - REQUIRE(post_c.second == true); - REQUIRE(post_c.first.n_gates() == 4); - } - GIVEN("A solved problem supplied with map and custom architecture") { - Circuit test_circuit; - test_circuit.add_blank_wires(4); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {1, 2}, {2, 3}, {3, 0}}); - - Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 0}}); - Placement test_p(test_arc); - - qubit_mapping_t map_; - for (unsigned nn = 0; nn <= 3; ++nn) { - map_[Qubit(nn)] = Node(nn); - } - test_p.place_with_map(test_circuit, map_); - qubit_vector_t all_qs_post_place = test_circuit.all_qubits(); - Routing router(test_circuit, test_arc); - std::pair result = router.solve(); - qubit_vector_t all_qs_post_solve = test_circuit.all_qubits(); - - REQUIRE(all_qs_post_place == all_qs_post_solve); - REQUIRE(result.second == false); - REQUIRE(result.first.n_gates() == 4); - } -} - -SCENARIO( - "If a circuit has fewer qubits than the architecture has nodes, is a " - "correct sub-architecture made", - "[routing]") { - GIVEN("A circuit and architecture obeying said scenario") { - // 5 wires, all used - Circuit test_circuit(5); - add_2qb_gates(test_circuit, OpType::CX, {{0, 4}, {2, 3}, {1, 4}}); - - SquareGrid arc(3, 3); - Routing route(test_circuit, arc); - route.solve(); - node_vector_t nodes = route.get_active_nodes(); - - REQUIRE(nodes.size() == 5); - - // 5 wires, 4 used - Circuit test_circuit2(5); - add_2qb_gates(test_circuit2, OpType::CX, {{0, 3}, {1, 2}}); - - Routing route2(test_circuit2, arc); - route2.solve(); - node_vector_t nodes2 = route.get_active_nodes(); - - REQUIRE(nodes2.size() == 5); - } -} - -SCENARIO("Qubit activating edge case", "[routing]") { - GIVEN("A node line with only 3 qubits line placed") { - Circuit circ; - circ.add_blank_wires(4); - add_2qb_gates( - circ, OpType::CX, {{1, 0}, {2, 0}, {2, 1}, {3, 0}, {3, 1}, {3, 2}}); - circ.add_op(OpType::CU1, 0.5, {1, 0}); - circ.add_op(OpType::CU1, 0.25, {2, 0}); - circ.add_op(OpType::CU1, 0.5, {2, 1}); - circ.add_op(OpType::CU1, 0.125, {3, 0}); - circ.add_op(OpType::CU1, 0.25, {3, 1}); - circ.add_op(OpType::CU1, 0.5, {3, 2}); - Transforms::rebase_tket().apply(circ); - Architecture arc({{0, 1}, {1, 2}, {2, 3}}); - Routing router(circ, arc); - std::pair c = router.solve(); - REQUIRE(respects_connectivity_constraints(c.first, arc, false, true)); - REQUIRE(c.second); - } -} - -SCENARIO("Empty Circuit test", "[routing]") { - GIVEN("An Empty Circuit") { - Circuit circ; - circ.add_blank_wires(4); - Architecture arc({{0, 1}, {1, 2}, {2, 3}}); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.first.n_gates() == 0); - REQUIRE(result.second == true); - REQUIRE(respects_connectivity_constraints(result.first, arc, true)); - } -} - -SCENARIO("Routing on circuit with no multi-qubit gates", "[routing]") { - GIVEN("A circuit with no multi-qubit gates") { - Circuit circ; - circ.add_blank_wires(4); - add_1qb_gates(circ, OpType::X, {0, 2}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::Y, {1}); - // circ.add_op(OpType::Y,{3}); - Architecture arc({{0, 1}, {1, 2}, {2, 3}}); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(circ.n_vertices() - 8 == result.first.n_gates()); - REQUIRE(result.second == true); - REQUIRE(respects_connectivity_constraints(result.first, arc, true)); - } -} - -SCENARIO("Test routing for other multi-qubit ops", "[routing]") { - GIVEN("Failed qft circuit") { - Circuit circ(4, 4); - add_1qb_gates(circ, OpType::X, {0, 2}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::CU1, 0.5, {1, 0}); - circ.add_op(OpType::CU1, 0.5, {0, 1}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::CU1, 0.25, {2, 0}); - circ.add_op(OpType::CU1, 0.5, {2, 1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::CU1, 0.125, {3, 0}); - circ.add_op(OpType::CU1, 0.25, {3, 1}); - circ.add_op(OpType::CU1, 0.5, {3, 2}); - circ.add_op(OpType::H, {3}); - for (unsigned nn = 0; nn <= 3; ++nn) { - circ.add_measure(nn, nn); - } - Transforms::rebase_tket().apply(circ); - Architecture arc({{0, 1}, {1, 2}, {2, 3}}); - Routing router(circ, arc); - std::pair result = router.solve(); - - REQUIRE(respects_connectivity_constraints(result.first, arc, false, true)); - REQUIRE(result.second); - } -} - -SCENARIO( - "Test routing on a directed architecture with bidirectional edges", - "[routing]") { - GIVEN("A simple two-qubit circuit") { - Circuit circ(2); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::CX, {0, 1}); - Architecture arc({{0, 1}, {1, 0}}); - Architecture arc2(std::vector>{{0, 1}}); - - // routing ignored bi directional edge and solves correctly - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.first.n_gates() == 2); - CHECK(respects_connectivity_constraints(result.first, arc, false)); - REQUIRE(result.second == true); - } -} - -SCENARIO( - "Test routing on a directed architecture doesn't throw an error if " - "non-cx optype is presented", - "[routing]") { - GIVEN( - "A simple two-qubit circuit with non-cx multi-qubit gates and a " - "directed architecture") { - Circuit circ(2); - circ.add_op(OpType::CU1, 0.5, {1, 0}); - circ.add_op(OpType::CU1, 0.5, {0, 1}); - circ.add_op(OpType::CY, {1, 0}); - circ.add_op(OpType::CY, {0, 1}); - circ.add_op(OpType::CZ, {1, 0}); - circ.add_op(OpType::CZ, {0, 1}); - circ.add_op(OpType::CRz, 0.5, {1, 0}); - circ.add_op(OpType::CRz, 0.5, {0, 1}); - - Architecture arc(std::vector>{{0, 1}}); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.second == true); - REQUIRE(result.first.n_gates() == 8); - } -} - -SCENARIO("Dense CX circuits route succesfully", "[routing]") { - GIVEN( - "Complex CX circuits for large directed architecture based off " - "IBMTokyo") { - Circuit circ(20); - for (unsigned x = 0; x < 17; ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - if (x % 2) { // swap the way directed chain runs each time - add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); - } else { - add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); - } - } - } - Architecture arc( - {{0, 1}, {1, 2}, {2, 3}, {3, 4}, {0, 5}, {1, 6}, {1, 7}, - {2, 6}, {2, 7}, {3, 8}, {3, 9}, {4, 8}, {4, 9}, {5, 6}, - {5, 10}, {5, 11}, {6, 10}, {6, 11}, {6, 7}, {7, 12}, {7, 13}, - {7, 8}, {8, 12}, {8, 13}, {8, 9}, {10, 11}, {11, 16}, {11, 17}, - {11, 12}, {12, 16}, {12, 17}, {12, 13}, {13, 18}, {13, 19}, {13, 14}, - {14, 18}, {14, 19}, {15, 16}, {16, 17}, {17, 18}, {18, 19}}); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.second); - (Transforms::decompose_SWAP_to_CX() >> Transforms::decompose_BRIDGE_to_CX()) - .apply(result.first); - Transforms::decompose_CX_directed(arc).apply(result.first); - REQUIRE(respects_connectivity_constraints(result.first, arc, true)); - } -} - -SCENARIO( - "Dense CX circuits route succesfully on undirected Ring with " - "placement.", - "[routing]") { - GIVEN("Complex CX circuits, big ring") { - Circuit circ(29); - for (unsigned x = 0; x < 29; ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - if (x % 2) { - add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); - } else { - add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); - } - } - } - RingArch arc(29); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.second); - Transforms::decompose_SWAP_to_CX().apply(result.first); - REQUIRE(respects_connectivity_constraints(result.first, arc, false, true)); - } -} - -SCENARIO( - "Dense CX circuits route succesfully on smart placement unfriendly " - "architecture.", - "[routing]") { - GIVEN("Complex CX circuits, big ring") { - Circuit circ(13); - for (unsigned x = 0; x < 13; ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - if (x % 2) { - add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); - } else { - add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); - } - } - } - Architecture arc( - {{0, 1}, - {2, 0}, - {2, 4}, - {6, 4}, - {8, 6}, - {8, 10}, - {12, 10}, - {3, 1}, - {3, 5}, - {7, 5}, - {7, 9}, - {11, 9}, - {11, 13}, - {12, 13}, - {6, 7}}); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.second); - REQUIRE(respects_connectivity_constraints(result.first, arc, false, true)); - } -} - -SCENARIO("Empty circuits, with and without blank wires", "[routing]") { - GIVEN("An empty circuit with some qubits") { - Circuit circ(6); - RingArch arc(6); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.first.depth() == 0); - REQUIRE(result.first.n_gates() == 0); - REQUIRE(result.first.n_qubits() == 6); - REQUIRE(result.second == true); - REQUIRE(respects_connectivity_constraints(result.first, arc, true)); - } - GIVEN("An empty circuit with no qubits") { - Circuit circ(0); - RingArch arc(6); - Routing router(circ, arc); - std::pair result = router.solve(); - REQUIRE(result.second == false); - REQUIRE(result.first.depth() == 0); - REQUIRE(result.first.n_gates() == 0); - REQUIRE(result.first.n_qubits() == 0); - } - - GIVEN("An empty circuit with no qubits, and empty architecture") { - Circuit circ(0); - std::vector> cons = {}; - Architecture arc(cons); - REQUIRE_THROWS_AS( - [&]() { Routing router(circ, arc); }(), ArchitectureMismatch); - } - GIVEN("An a mismatch") { - Circuit circ(5); - RingArch arc(4); - REQUIRE_THROWS_AS( - [&]() { Routing router(circ, arc); }(), ArchitectureMismatch); - } -} - -/* METHODS TO COVER IN TESTING: */ -/* Routing class: */ - -// Routing::increment_distance -SCENARIO("Does increment distance work?", "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {2, 3}, {4, 5}}); - SquareGrid test_architecture(2, 3); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - ; - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - GIVEN("Suitable Distance vector, Swap and increment.") { - unsigned diameter = test_architecture.get_diameter(); - graphs::dist_vec test_distance(diameter, 2); - Swap test_swap = {square_nodes[0], square_nodes[1]}; - int increment = 2; - unsigned distance_index = diameter - test_architecture.get_distance( - test_swap.first, test_swap.second); - int pre_increment_val = test_distance[distance_index]; - routing_tester.increment_distance(test_distance, test_swap, increment); - REQUIRE(pre_increment_val + increment == test_distance[distance_index]); - } - GIVEN("Realistic Distance Vector, non_adjacent Swap, absurd increment.") { - unsigned diameter = test_architecture.get_diameter(); - graphs::dist_vec test_distance(diameter, 2); - Swap test_swap = {square_nodes[0], square_nodes[5]}; - int increment = 30; - unsigned distance_index = diameter - test_architecture.get_distance( - test_swap.first, test_swap.second); - int pre_increment_val = test_distance[distance_index]; - routing_tester.increment_distance(test_distance, test_swap, increment); - REQUIRE(pre_increment_val + increment == test_distance[distance_index]); - } -} - -// Routing::generate_distance_vector -SCENARIO("Does generate_distance_vector work suitably?", "[routing]") { - GIVEN("A realistic small interaction vector and architecture") { - // Creating RoutingTester object - Circuit test_circuit(6); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {2, 3}, {4, 5}}); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - - std::array inte_pattern = {1, 0, 5, 3, 4, 2}; - Interactions test_interaction; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - // no placement invoked, should be 0 at diameter distance, 1 at distance 2, - // 1 at distance 1. i.e. {0,2} - graphs::dist_vec out_distances = - routing_tester.generate_distance_vector(test_interaction); - REQUIRE( - out_distances[0] == - 0); // 0 entries at distance diameter away from each other - REQUIRE( - out_distances[1] == - 2); // 2 entries at distance diameter - 1 away from each other - } - GIVEN("A realistic large interaction vector and architecture") { - // Creating larger RoutingTester object - Circuit test_circuit(10); - SquareGrid test_architecture(2, 5); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 -- 2 -- 3 -- 4 - // | | | | | - // 5 -- 6 -- 7 -- 8 -- 9 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - - unsigned ind = 0; - Interactions test_interaction; - std::array inte_pattern{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - // Expected distances: - // 9-0 -> 5 - // 8-1 -> 3 - // 7-2 -> 1 - // 6-3 -> 3 - // 5-4 -> 5 - // i.e. - graphs::dist_vec expected_distances = { - 4, 0, 4, 0}; // 4 qubits at diameter, 0 at diameter-1, 4 qubits at - // diameter-2, 0 at diameter-3 - graphs::dist_vec out_distances = - routing_tester.generate_distance_vector(test_interaction); - REQUIRE(out_distances == expected_distances); - } -} - -// Routing::update_distance_vector -SCENARIO("Does update_distance_vector update as intended?", "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - // update_distance_vector is four indiviudal increment_distances - GIVEN("Realistic Distance vector, Swap and Interaction vector.") { - unsigned diameter = test_architecture.get_diameter(); - graphs::dist_vec test_distance = {0, 2}; - unsigned ind = 0; - Interactions test_interaction; - std::array inte_pattern{1, 0, 5, 3, 4, 2}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - graphs::dist_vec quick_compare_distance = - routing_tester.generate_distance_vector(test_interaction); - REQUIRE(quick_compare_distance == test_distance); - - Swap test_swap = {square_nodes[2], square_nodes[4]}; - - // Distances from full method - graphs::dist_vec out_distance = routing_tester.update_distance_vector( - test_swap, test_distance, test_interaction); - // Forming Distances from individual steps: - // (1) 2 in test_swap is interacting with qubit 5, a distance of 2 away - // this swap brings the two qubits adjacent - unsigned distance_index_1 = - diameter - test_architecture.get_distance( - test_swap.first, test_interaction[test_swap.first]); - int pre_increment_val_1 = test_distance[distance_index_1]; - routing_tester.increment_distance( - test_distance, {test_swap.first, test_interaction[test_swap.first]}, - -2); - REQUIRE(pre_increment_val_1 - 2 == test_distance[distance_index_1]); - // (2), 4 in test_swap is not interacting, test_distances won't change - REQUIRE( - test_architecture.get_distance( - test_swap.second, test_interaction[test_swap.second]) == 0); - // (3), 4 in test_swap and the qubit 2 is interacting with 5 are adjacent, - // test_distances won't change - REQUIRE( - test_architecture.get_distance( - test_swap.second, test_interaction[test_swap.first]) == 1); - // (4), 2 in test swap and the qubit 4 is interacting with 0 are adjacent, - // test_distances won't change - REQUIRE( - test_architecture.get_distance( - test_swap.first, test_interaction[test_swap.second]) == 1); - - REQUIRE(out_distance[0] == test_distance[0]); - REQUIRE(out_distance[1] == test_distance[1]); - } -} -// Routing::pair_dists -SCENARIO( - "Does pair_dists return the correct distances, in the correct order?", - "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - GIVEN( - "Realistic architecture nodes. Distance between pair_1 less than " - "between pair_2.") { - std::pair pair_1 = {square_nodes[0], square_nodes[3]}; - std::pair pair_2 = {square_nodes[1], square_nodes[4]}; - unsigned dist_1 = - test_architecture.get_distance(pair_1.first, pair_1.second); - REQUIRE(dist_1 == 2); - unsigned dist_2 = - test_architecture.get_distance(pair_2.first, pair_2.second); - REQUIRE(dist_2 == 3); - std::pair pair_dists_results = - routing_tester.pair_dists( - pair_1.first, pair_1.second, pair_2.first, pair_2.second); - REQUIRE(pair_dists_results.first == dist_2); - REQUIRE(pair_dists_results.second == dist_1); - } - GIVEN( - "Realistic architecture nodes. Distance between pair_1 greater than " - "between pair_2.") { - std::pair pair_1 = {square_nodes[4], square_nodes[3]}; - std::pair pair_2 = {square_nodes[0], square_nodes[2]}; - unsigned dist_1 = - test_architecture.get_distance(pair_1.first, pair_1.second); - REQUIRE(dist_1 == 2); - unsigned dist_2 = - test_architecture.get_distance(pair_2.first, pair_2.second); - REQUIRE(dist_2 == 1); - std::pair pair_dists_results = - routing_tester.pair_dists( - pair_1.first, pair_1.second, pair_2.first, pair_2.second); - REQUIRE(pair_dists_results.first == dist_1); - REQUIRE(pair_dists_results.second == dist_2); - } -} - -// Routing::swap_decreases -SCENARIO( - "Does swap_decreases correctly determine between two placements?", - "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - GIVEN("A swap that improves placement for given interaction vector.") { - // only nodes 0 and 5 have an interacting pair of qubits between them - Interactions test_interaction; - unsigned ind = 0; - std::array inte_pattern{5, 1, 2, 3, 4, 0}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - Swap test_swap = {square_nodes[0], square_nodes[2]}; - Swap test_swap_interactions = {square_nodes[5], square_nodes[2]}; - // Confirm swap_decreases functions as expected - REQUIRE(routing_tester.swap_decreases(test_swap, test_interaction) == true); - // Confirm working elements of swap_decreases does also - unsigned dist_1 = test_architecture.get_distance( - test_swap.first, test_swap_interactions.first); - REQUIRE(dist_1 == 3); - unsigned dist_2 = test_architecture.get_distance( - test_swap.second, test_swap_interactions.second); - REQUIRE(dist_2 == 0); - unsigned dist_3 = test_architecture.get_distance( - test_swap.second, test_swap_interactions.first); - REQUIRE(dist_3 == 2); - unsigned dist_4 = test_architecture.get_distance( - test_swap.first, test_swap_interactions.second); - REQUIRE(dist_4 == 1); - - std::pair old_dists = {dist_1, dist_2}; - std::pair new_dists = {dist_3, dist_4}; - REQUIRE(new_dists < old_dists); - } - GIVEN("A swap containing non-interacting nodes.") { - unsigned ind = 0; - Interactions test_interaction; - // only nodes 0 and 5 have an interacting pair of qubits between them - std::array inte_pattern{5, 1, 2, 3, 4, 0}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - Swap test_swap = {square_nodes[1], square_nodes[3]}; - Swap test_swap_interactions = {square_nodes[1], square_nodes[3]}; - // Confirm swap_decreases functions as expected - REQUIRE( - routing_tester.swap_decreases(test_swap, test_interaction) == false); - } -} - -// Routing::candidate_swaps -SCENARIO("Does candidate swaps return all suitable edges?", "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - std::vector test_arc = test_architecture.get_all_edges_vec(); - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - GIVEN("One pair of interacting qubits, four suitable edges between them.") { - unsigned ind = 0; - Interactions test_interaction; - std::array inte_pattern{3, 1, 2, 0, 4, 5}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - std::vector correct_swaps = { - {square_nodes[0], square_nodes[1]}, - {square_nodes[0], square_nodes[2]}, - {square_nodes[1], square_nodes[3]}, - {square_nodes[2], square_nodes[3]}}; - std::vector test_swaps = - routing_tester.candidate_swaps(test_arc, test_interaction); - REQUIRE(test_swaps.size() == 4); - REQUIRE(test_swaps == correct_swaps); - } - GIVEN("A case wherein no edges are suitable.") { - unsigned ind = 0; - Interactions test_interaction; - // easiest to replicate this case by making all interactions adjacent - std::array inte_pattern{1, 0, 3, 2, 5, 4}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - std::vector test_swaps = - routing_tester.candidate_swaps(test_arc, test_interaction); - REQUIRE(test_swaps.size() == 0); - } - GIVEN( - "A case with all qubits interacting, 5 suitable edges between " - "them.") { - unsigned ind = 0; - Interactions test_interaction; - std::array inte_pattern{5, 2, 1, 4, 3, 0}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - std::vector correct_swaps = { - {square_nodes[0], square_nodes[1]}, - {square_nodes[0], square_nodes[2]}, - {square_nodes[2], square_nodes[3]}, - {square_nodes[3], square_nodes[5]}, - {square_nodes[4], square_nodes[5]}}; - std::vector test_swaps = - routing_tester.candidate_swaps(test_arc, test_interaction); - REQUIRE(test_swaps.size() == 5); - REQUIRE(test_swaps == correct_swaps); - } -} - -// Routing::cowtan_et_al_heuristic -SCENARIO( - "Does implementation of heuristic outlined in paper work as expected?", - "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - GIVEN("One pair of interacting qubits, four suitable swap gates.") { - std::vector test_swaps = { - {square_nodes[0], square_nodes[1]}, - {square_nodes[0], square_nodes[2]}, - {square_nodes[1], square_nodes[3]}, - {square_nodes[2], square_nodes[3]}, - {square_nodes[3], square_nodes[5]}}; - graphs::dist_vec test_distances = {0, 2}; - Interactions test_interaction; - unsigned ind = 0; - std::array inte_pattern{3, 1, 2, 0, 4, 5}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - std::vector output_swaps = routing_tester.cowtan_et_al_heuristic( - test_swaps, test_distances, test_interaction); - std::vector expected_output = { - {square_nodes[0], square_nodes[1]}, - {square_nodes[0], square_nodes[2]}, - {square_nodes[1], square_nodes[3]}, - {square_nodes[2], square_nodes[3]}}; - REQUIRE(output_swaps == expected_output); - } - GIVEN("Two pairs of interacting qubits, two suitable swap gates.") { - std::vector test_swaps = { - {square_nodes[0], square_nodes[1]}, {square_nodes[0], square_nodes[2]}, - {square_nodes[1], square_nodes[3]}, {square_nodes[2], square_nodes[3]}, - {square_nodes[2], square_nodes[4]}, {square_nodes[3], square_nodes[5]}, - {square_nodes[4], square_nodes[5]}}; - unsigned ind = 0; - Interactions test_interaction; - std::array inte_pattern{3, 4, 2, 0, 1, 5}; - for (unsigned i = 0; i < inte_pattern.size(); ++i) { - test_interaction.insert({square_nodes[i], square_nodes[inte_pattern[i]]}); - } - graphs::dist_vec test_distances = {2, 2}; - std::vector output_swaps = routing_tester.cowtan_et_al_heuristic( - test_swaps, test_distances, test_interaction); - std::vector expected_output = { - {square_nodes[0], square_nodes[1]}, {square_nodes[1], square_nodes[3]}}; - REQUIRE(output_swaps == expected_output); - } -} - -// Routing::update_qmap -SCENARIO("Does update qmap correctly update mapping from swap?", "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(2); - RingArch test_architecture(2); - node_vector_t ring_nodes = test_architecture.get_all_nodes_vec(); - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - Qubit qb0(0); - Qubit qb1(1); - qubit_bimap_t test_map; - test_map.left.insert({qb0, ring_nodes[0]}); - test_map.left.insert({qb1, ring_nodes[1]}); - - routing_tester.update_qmap(test_map, {ring_nodes[0], ring_nodes[1]}); - REQUIRE(test_map.right.at(ring_nodes[0]) == qb1); - REQUIRE(test_map.right.at(ring_nodes[1]) == qb0); -} - -// Routing::solve_furthest interior functions -SCENARIO( - "Do solve_furthest interior methods find and swap along the expected " - "path?", - "[routing]") { - // Creating RoutingTester object - Circuit test_circuit(6); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {2, 3}, {4, 5}}); - SquareGrid test_architecture(3, 2); - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - Routing test_router(test_circuit, test_architecture); - RoutingTester routing_tester(&test_router); - - unsigned node0, node1; - node_vector_t expected_path; - std::vector expected_swaps; - GIVEN("An expected path with an even number of nodes.") { - node0 = 0, node1 = 5; - expected_path = { - square_nodes[5], square_nodes[3], square_nodes[1], square_nodes[0]}; - expected_swaps = { - {square_nodes[5], square_nodes[3]}, {square_nodes[3], square_nodes[1]}}; - } - GIVEN("An expected path with an odd number of nodes.") { - node0 = 0, node1 = 3; - expected_path = {square_nodes[3], square_nodes[1], square_nodes[0]}; - expected_swaps = {{square_nodes[3], square_nodes[1]}}; - } - GIVEN("An adjacent path doesn't fail.") { - node0 = 0, node1 = 1; - expected_path = {square_nodes[1], square_nodes[0]}; - expected_swaps = {}; - } - // Collect path from architecture - const node_vector_t test_path = - test_architecture.get_path(square_nodes[node0], square_nodes[node1]); - REQUIRE(test_path == expected_path); - qubit_bimap_t test_map; - for (unsigned i = 0; i < 6; i++) { - test_map.left.insert({Qubit(i), square_nodes[i]}); - } - routing_tester.set_qmap(test_map); - const std::vector path_swaps = routing_tester.path_to_swaps(test_path); - REQUIRE(path_swaps == expected_swaps); -} - -// generate_test_interaction_graph and qubit_lines -SCENARIO("Test interaction graph and line generation", "[routing]") { - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - - GIVEN("A small test circuit with 1 layer, all qubits in 2qb gates.") { - Circuit test_circuit(6); - add_2qb_gates(test_circuit, OpType::CX, {{0, 1}, {2, 3}, {4, 5}}); - QubitGraph test_qubit_graph = generate_interaction_graph(test_circuit); - - REQUIRE(test_qubit_graph.n_connections() == 3); - REQUIRE(test_qubit_graph.edge_exists(Qubit(0), Qubit(1))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(2), Qubit(3))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(4), Qubit(5))); - - QubitLineList qlines = qubit_lines(test_circuit); - QubitLineList correct_lines = { - {Qubit(0), Qubit(1)}, {Qubit(2), Qubit(3)}, {Qubit(4), Qubit(5)}}; - REQUIRE(qlines == correct_lines); - } - GIVEN("A small test circuit with 1 layer, not all qubits in 2qb gates.") { - Circuit test_circuit(6); - test_circuit.add_op(OpType::CX, {0, 1}); - test_circuit.add_op(OpType::H, {5}); - test_circuit.add_op(OpType::H, {3}); - test_circuit.add_op(OpType::CX, {2, 4}); - - QubitGraph test_qubit_graph = generate_interaction_graph(test_circuit); - - REQUIRE(test_qubit_graph.n_connections() == 2); - REQUIRE(test_qubit_graph.edge_exists(Qubit(0), Qubit(1))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(2), Qubit(4))); - - QubitLineList qlines = qubit_lines(test_circuit); - QubitLineList correct_lines = { - {Qubit(0), Qubit(1)}, - {Qubit(2), Qubit(4)}, - {Qubit(3)}, - {Qubit(5)}}; // It is not guaranteed to match qubit numbers as qubits - // are not unsigneds - REQUIRE(qlines == correct_lines); - } - GIVEN("A small test circuit with 2 layers.") { - Circuit test_circuit(6); - add_2qb_gates( - test_circuit, OpType::CX, - {{0, 1}, {2, 3}, {4, 5}, {2, 1}, {4, 3}, {5, 1}}); - - QubitGraph test_qubit_graph = generate_interaction_graph(test_circuit); - - REQUIRE(test_qubit_graph.n_connections() == 5); - REQUIRE(test_qubit_graph.edge_exists(Qubit(0), Qubit(1))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(2), Qubit(3))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(4), Qubit(5))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(2), Qubit(1))); - REQUIRE(test_qubit_graph.edge_exists(Qubit(4), Qubit(3))); - - QubitLineList qlines = qubit_lines(test_circuit); - QubitLineList correct_lines = { - {Qubit(0), Qubit(1), Qubit(2), Qubit(3), Qubit(4), Qubit(5)}}; - REQUIRE(qlines == correct_lines); - } -} - -// solve_with_map -SCENARIO("Test routing with partial map provided", "[routing]") { - GIVEN("A partial map where no node should be removed.") { - Circuit circ(3); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::CX, {0, 2}); - Architecture arc({{0, 1}, {1, 2}}); - - // force a partial map which requires unused node to solve - Placement pl(arc); - qubit_mapping_t map_ = {{Qubit(0), Node(0)}, {Qubit(2), Node(2)}}; - pl.place_with_map(circ, map_); - Routing router(circ, arc); - std::pair result = router.solve(); - - REQUIRE(result.second == true); - // check solution is valid and respects map - std::vector test_coms = result.first.get_commands(); - REQUIRE(test_coms.size() == 3); - bool oph = (*test_coms[0].get_op_ptr() == *get_op_ptr(OpType::H)); - oph &= (test_coms[0].get_args()[0] == Node(0)); - REQUIRE(oph); - REQUIRE(*test_coms[1].get_op_ptr() == *get_op_ptr(OpType::SWAP)); - REQUIRE(*test_coms[2].get_op_ptr() == *get_op_ptr(OpType::CX)); - } - - GIVEN("A mapped set of nodes") { - Circuit circ(4); - Qubit qb0(0); - Qubit qb1(1); - Qubit qb2(2); - Qubit qb3(3); - // test removal only happpens if subgraph remains connected - SquareGrid test_architecture(3, 2); - Architecture subarc = test_architecture; - node_vector_t square_nodes = test_architecture.get_all_nodes_vec(); - // 0 -- 1 - // | | - // 2 -- 3 - // | | - // 4 -- 5 - // subarc = {0, 1, 3} - subarc.remove_node(square_nodes[5]); - subarc.remove_node(square_nodes[4]); - subarc.remove_node(square_nodes[3]); - REQUIRE(subgraph_remove_if_connected( - test_architecture, subarc, square_nodes[3])); - REQUIRE(!subgraph_remove_if_connected( - test_architecture, subarc, square_nodes[1])); - REQUIRE(subgraph_remove_if_connected( - test_architecture, subarc, square_nodes[4])); - REQUIRE(subgraph_remove_if_connected( - test_architecture, subarc, square_nodes[5])); - REQUIRE(test_architecture.n_connections() == 2); - - SquareGrid test_architecture2(3, 2); - qubit_bimap_t map; - map.left.insert({qb0, square_nodes[0]}); - map.left.insert({qb1, square_nodes[1]}); - map.left.insert({qb2, square_nodes[2]}); - - remove_unmapped_nodes(test_architecture2, map, circ); - REQUIRE(test_architecture2.n_connections() == 2); - REQUIRE(test_architecture2.edge_exists(square_nodes[0], square_nodes[1])); - REQUIRE(test_architecture2.edge_exists(square_nodes[0], square_nodes[2])); - - // test unmapped nodes which cannot be removed are mapped to a qubit - map.left.erase(qb0); - - remove_unmapped_nodes(test_architecture2, map, circ); - REQUIRE(map.left.find(qb0)->second == square_nodes[0]); - REQUIRE(test_architecture2.n_connections() == 2); - REQUIRE(test_architecture2.edge_exists(square_nodes[0], square_nodes[1])); - REQUIRE(test_architecture2.edge_exists(square_nodes[0], square_nodes[2])); - - // test when an unmapped node is mapped, the most connected is chosen - // (i.e. least connected nodes are removed first) - Architecture test_architecture3({{0, 1}, {0, 2}, {1, 3}, {2, 3}, {2, 4}}); - // 0 -- 1 - // | | - // 2 -- 3 - // | - // 4 - - qubit_bimap_t map2; - map2.left.insert({qb0, Node(0)}); - map2.left.insert({qb3, Node(3)}); - remove_unmapped_nodes(test_architecture3, map2, circ); - REQUIRE(map2.right.find(Node(2))->second == qb1); - bool no_4 = map2.right.find(Node(4)) == map2.right.end(); - REQUIRE(no_4); - } -} - -// Every command in the circuit with a specified optype -// must have a specified single qubit argument. -static void require_arguments_for_specified_commands( - const Circuit &circ, const std::map &the_map) { - for (Command com : circ) { - const auto type = com.get_op_ptr()->get_type(); - const auto citer = the_map.find(type); - if (citer != the_map.cend()) { - unit_vector_t comp = {citer->second}; - REQUIRE(com.get_args() == comp); - } - } -} - -SCENARIO( - "Does shifting single qubit gates through SWAP gates to get find nodes " - "with better fidelity work?", - "[routing]") { - Architecture arc({{0, 1}, {1, 2}}); - gate_error_t ge_0(0.3); - gate_error_t ge_1(0.2); - gate_error_t ge_2(0.1); - op_node_errors_t nec; - op_errors_t gec_0({{OpType::H, ge_0}, {OpType::X, ge_1}}); - op_errors_t gec_1({{OpType::H, ge_1}, {OpType::X, ge_2}}); - op_errors_t gec_2({{OpType::H, ge_2}, {OpType::X, ge_0}}); - - nec.insert({Node(2), gec_2}); - nec.insert({Node(0), gec_0}); - nec.insert({Node(1), gec_1}); - - GIVEN( - "A simple two qubit circuit with clear difference between node " - "fidelities") { - Circuit circ(2); - add_1qb_gates(circ, OpType::H, {0, 1}); - circ.add_op(OpType::SWAP, {0, 1}); - reassign_boundary(circ); - Transforms::commute_SQ_gates_through_SWAPS(nec).apply(circ); - require_arguments_for_specified_commands( - circ, {{OpType::H, circ.all_qubits().at(1)}}); - } - GIVEN( - "A simple two qubit circuit with multiple single qubit operations " - "requiring movement before a SWAP.") { - Circuit circ(2); - add_1qb_gates(circ, OpType::H, {0, 0, 0, 1}); - circ.add_op(OpType::SWAP, {0, 1}); - reassign_boundary(circ); - Transforms::commute_SQ_gates_through_SWAPS(nec).apply(circ); - require_arguments_for_specified_commands( - circ, {{OpType::H, circ.all_qubits().at(1)}}); - } - GIVEN("Multiple SWAP gates, multiple single qubit gates.") { - Circuit circ(3); - add_1qb_gates(circ, OpType::H, {0, 0, 0, 1}); - add_2qb_gates(circ, OpType::SWAP, {{0, 1}, {1, 2}, {0, 1}, {1, 2}, {1, 2}}); - - reassign_boundary(circ); - Transforms::commute_SQ_gates_through_SWAPS(nec).apply(circ); - require_arguments_for_specified_commands( - circ, {{OpType::H, circ.all_qubits().at(2)}}); - } - GIVEN( - "Multiple SWAP gates, multiple single qubit gates of various " - "OpType.") { - Circuit circ(3); - add_1qb_gates(circ, OpType::X, {0, 0, 1, 1}); - add_1qb_gates(circ, OpType::H, {0, 0, 0, 1}); - add_2qb_gates(circ, OpType::SWAP, {{0, 1}, {1, 2}, {0, 1}, {1, 2}, {1, 2}}); - - reassign_boundary(circ); - Transforms::commute_SQ_gates_through_SWAPS(nec).apply(circ); - const qubit_vector_t qbs = circ.all_qubits(); - require_arguments_for_specified_commands( - circ, {{OpType::H, qbs.at(2)}, {OpType::X, qbs.at(1)}}); - } - GIVEN( - "A large circuit of CX gates, H gates and X gates, routed and " - "shifted.") { - Circuit circ(9); - for (unsigned x = 0; x < circ.n_qubits(); ++x) { - for (unsigned y = 0; y + 1 < x; ++y) { - if (x % 2) { - circ.add_op(OpType::SWAP, {x, y}); - circ.add_op(OpType::X, {x}); - circ.add_op(OpType::H, {x}); - circ.add_op(OpType::SWAP, {y + 1, y}); - } else { - circ.add_op(OpType::SWAP, {y, x}); - circ.add_op(OpType::H, {y}); - circ.add_op(OpType::X, {y}); - circ.add_op(OpType::SWAP, {y, y + 1}); - } - } - } - SquareGrid arc(3, 3); - node_vector_t square_nodes = arc.get_all_nodes_vec(); - - const std::vector gate_errors{ - 0.3, 0.2, 0.1, 0.02, 0.22, 0.46, 0.18, 1.0 - 0.907, 1.0 - 0.7241}; - REQUIRE(arc.get_columns() * arc.get_rows() == gate_errors.size()); - REQUIRE(gate_errors.size() == square_nodes.size()); - REQUIRE(circ.n_qubits() == gate_errors.size()); - op_node_errors_t nec; - unsigned ind = 0; - for (unsigned nn = 0; nn < square_nodes.size(); ++nn) { - nec[square_nodes[nn]] = op_errors_t( - {{OpType::H, gate_errors[nn]}, - {OpType::X, gate_errors[(nn + 3) % gate_errors.size()]}}); - } - DeviceCharacterisation characterisation(nec); - - Circuit test_0 = circ; - reassign_boundary(test_0, square_nodes); - Transforms::decompose_SWAP_to_CX().apply(test_0); - const auto sv0 = tket_sim::get_statevector(test_0); - double pre_aggregate = 0; - - qubit_bimap_t qmap; - qubit_vector_t free_qs = test_0.all_qubits(); - for (unsigned u = 0; u < free_qs.size(); u++) { - qmap.insert({free_qs[u], square_nodes[u]}); - } - - for (Command com : test_0) { - OpType ot = com.get_op_ptr()->get_type(); - if (ot == OpType::X || ot == OpType::H) { - Node n = qmap.left.at(Qubit(com.get_args()[0])); - pre_aggregate += 1.0 - characterisation.get_error(Node(n), ot); - } - } - reassign_boundary(circ, square_nodes); - Transforms::commute_SQ_gates_through_SWAPS(nec).apply(circ); - Circuit test_1 = circ; - Transforms::decompose_SWAP_to_CX().apply(test_1); - const auto sv1 = tket_sim::get_statevector(test_1); - double post_aggregate = 0; - for (Command com : test_1) { - OpType ot = com.get_op_ptr()->get_type(); - if (ot == OpType::X || ot == OpType::H) { - Node n = qmap.left.at(Qubit(com.get_args()[0])); - post_aggregate += 1.0 - characterisation.get_error(Node(n), ot); - } - } - REQUIRE(tket_sim::compare_statevectors_or_unitaries(sv0, sv1)); - REQUIRE(post_aggregate > pre_aggregate); - } -} -SCENARIO("Test barrier is ignored by routing") { - GIVEN("Circuit with 1qb barrier") { - Circuit circ(3); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::Rz, 0.3, {0}); - circ.add_barrier(uvec{0}); - circ.add_op(OpType::CX, {1, 2}); - SquareGrid test_architecture(1, 3); - GraphPlacement gp(test_architecture); - gp.place(circ); - Routing router(circ, test_architecture); - Circuit pc = router.solve().first; - REQUIRE(pc.depth() == 2); - check_command_types( - pc, {OpType::CX, OpType::Rz, OpType::CX, OpType::Barrier}); - } - GIVEN("Circuit with 2 qb barrier") { - Circuit circ(2); - circ.add_op(OpType::CX, {1, 0}); - circ.add_barrier({0, 1}); - circ.add_op(OpType::CX, {0, 1}); - SquareGrid test_architecture(1, 2); - Routing router(circ, test_architecture); - check_command_types( - router.solve().first, {OpType::CX, OpType::Barrier, OpType::CX}); - } - GIVEN("Circuit with 4 qb barrier, using gen_full_mapping_pass.") { - const std::vector nums = { - Node("rig", 21), Node("rig", 22), Node("rig", 25), Node("rig", 35), - Node("rig", 36)}; - const std::vector> coupling_list_indices = { - {0, 1}, {0, 4}, {1, 0}, {1, 3}, {4, 0}, {4, 3}, {3, 1}, {3, 4}}; - - std::vector> coupling_list; - for (const auto &pair : coupling_list_indices) { - coupling_list.push_back( - std::make_pair(nums[pair.first], nums[pair.second])); - } - Architecture arc(coupling_list); - Circuit circ(4); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - add_2qb_gates(circ, OpType::CZ, {{1, 2}, {3, 2}, {3, 1}}); - circ.add_barrier({0, 1, 2, 3}); - - RoutingConfig config = {}; - PlacementPtr pp = std::make_shared(arc); - PassPtr p = gen_full_mapping_pass(arc, pp, config); - CompilationUnit cu(circ); - p->apply(cu); - REQUIRE( - respects_connectivity_constraints(cu.get_circ_ref(), arc, false, true)); - } - GIVEN("Check Circuit with 2qb barrier does not add swaps for the barrier") { - Circuit circ(3); - Architecture line({{0, 1}, {1, 2}}); - GraphPlacement gp(line); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - circ.add_barrier({0, 2}); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - gp.place(circ); - - Routing router(circ, line); - qubit_vector_t all_qs_pre = circ.all_qubits(); - std::pair pc = router.solve(); - qubit_vector_t all_qs_post = circ.all_qubits(); - REQUIRE(all_qs_pre == all_qs_post); - REQUIRE(pc.second == false); - REQUIRE(pc.first.depth() == 4); - } - GIVEN( - "Check Circuit with 2qb barrier does not add swaps for the barrier, " - "with no placement.") { - Circuit circ(3); - Architecture line({{0, 1}, {1, 2}}); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - circ.add_barrier({0, 2}); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - GraphPlacement gp(line); - gp.place(circ); - Routing router(circ, line); - unsigned pre_depth = circ.depth(); - std::pair pc = router.solve({}); - REQUIRE(pc.second == false); - unsigned post_depth = pc.first.depth(); - REQUIRE(post_depth == pre_depth); - REQUIRE(post_depth == 4); - } - GIVEN("Circuit with 3 qb barrier") { - Circuit circ(3); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}}); - circ.add_barrier({0, 1, 2}); - Architecture line({{0, 1}, {1, 2}}); - GraphPlacement gp(line); - gp.place(circ); - Routing router(circ, line); - qubit_vector_t all_qs_pre = circ.all_qubits(); - std::pair pc = router.solve(); - qubit_vector_t all_qs_post = circ.all_qubits(); - REQUIRE(all_qs_pre == all_qs_post); - REQUIRE(pc.first.depth() == 2); - REQUIRE(pc.second == false); - } -} - -SCENARIO( - "Does identification and insertion of bridge circuit do as expected?") { - GIVEN( - "A proposed SWAP which will act detrimentally for the next timestep, " - "i.e. a bridge should be inserted.") { - Circuit circ(9); - for (unsigned i = 0; i < 9; i++) { - circ.add_op(OpType::H, {i}); - } - add_2qb_gates(circ, OpType::CX, {{0, 4}, {3, 8}, {4, 7}, {3, 6}}); - - SquareGrid arc(3, 3); - node_vector_t square_nodes = arc.get_all_nodes_vec(); - Routing router(circ, arc); - RoutingTester test_router(&router); - RoutingConfig new_config(50, 0, 0, 0); - test_router.set_config(new_config); - qubit_bimap_t qmap = test_router.set_default_initial_map(square_nodes); - test_router.initialise_slicefrontier(); - test_router.set_interaction(); - std::pair, std::pair> output = - test_router.check_distributed_cx({square_nodes[1], square_nodes[4]}); - std::pair, std::pair> expected = { - {false, Node(0)}, {false, Node(0)}}; - REQUIRE(output == expected); - } - GIVEN( - "A proposed SWAP which will act better for the next timestep, i.e. a " - "bridge should not be inserted.") { - Circuit circ(9); - for (unsigned i = 0; i < 9; i++) { - circ.add_op(OpType::H, {i}); - } - add_2qb_gates(circ, OpType::CX, {{0, 4}, {3, 8}, {4, 7}, {3, 6}}); - - SquareGrid arc(3, 3); - node_vector_t square_nodes = arc.get_all_nodes_vec(); - Routing router(circ, arc); - RoutingTester test_router(&router); - qubit_bimap_t qmap = test_router.set_default_initial_map(square_nodes); - RoutingConfig new_config(50, 0, 0, 0); - test_router.set_config(new_config); - test_router.initialise_slicefrontier(); - test_router.set_interaction(); - std::pair, std::pair> output = - test_router.check_distributed_cx({square_nodes[3], square_nodes[4]}); - std::pair, std::pair> expected{ - {false, Node(0)}, {false, Node(0)}}; - REQUIRE(output == expected); - } - GIVEN("Multiple bridges to be inserted.") { - Circuit circ(6); - SquareGrid arc(6, 1); - add_2qb_gates(circ, OpType::CX, {{0, 2}, {3, 5}, {1, 3}}); - node_vector_t square_nodes = arc.get_all_nodes_vec(); - Routing router(circ, arc); - RoutingTester test_router(&router); - qubit_bimap_t qmap = test_router.set_default_initial_map(square_nodes); - test_router.initialise_slicefrontier(); - test_router.set_interaction(); - test_router.add_distributed_cx( - square_nodes[3], square_nodes[5], square_nodes[4]); - test_router.add_distributed_cx( - square_nodes[0], square_nodes[2], square_nodes[1]); - REQUIRE(test_router.get_circ()->n_gates() == 3); - test_router.advance_frontier(); - } - GIVEN("Consecutive CX edge case") { - Circuit circ(5); - Architecture arc({{0, 1}, {1, 2}, {0, 3}, {1, 4}, {3, 4}}); - add_2qb_gates(circ, OpType::CX, {{1, 2}, {0, 2}, {0, 1}}); - Routing router(circ, arc); - RoutingTester test_router(&router); - qubit_bimap_t qmap = test_router.set_default_initial_map(); - test_router.initialise_slicefrontier(); - test_router.advance_frontier(); - test_router.set_interaction(); - test_router.add_distributed_cx(Node(0), Node(2), Node(1)); - test_router.advance_frontier(); - } -} - -SCENARIO( - "Do Placement and Routing work if the given graph perfectly solves the " - "problem?") { - GIVEN("A perfect example without clifford_simp") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{1, 2}, - {0, 3}, - {1, 4}, - {1, 2}, - {0, 1}, - {2, 0}, - {2, 1}, - {0, 1}, - {2, 0}, - {1, 4}, - {1, 3}, - {1, 0}}); - - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - RoutingConfig default_config(50, 0, 0, 0); - QubitGraph q_graph = - monomorph_interaction_graph(circ, arc.n_connections(), 5); - std::vector potential_maps = - monomorphism_edge_break(arc, q_graph, 10000, 60000); - - qubit_mapping_t init_map = bimap_to_map(potential_maps[0].left); - Placement pl(arc); - pl.place_with_map(circ, init_map); - Routing router(circ, arc); - std::pair out_circ = router.solve(default_config); - REQUIRE( - respects_connectivity_constraints(out_circ.first, arc, false) == true); - REQUIRE(out_circ.second); - } - GIVEN( - "The circuit left after clifford_simp, without clifford simp " - "applied") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{0, 3}, - {1, 4}, - {0, 1}, - {2, 0}, - {2, 1}, - {1, 0}, - {0, 4}, - {2, 1}, - {0, 3}}); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - RoutingConfig default_config(50, 0, 0, 0); - QubitGraph q_graph = - monomorph_interaction_graph(circ, arc.n_connections(), 5); - std::vector potential_maps = - monomorphism_edge_break(arc, q_graph, 10000, 60000); - qubit_mapping_t init_map = bimap_to_map(potential_maps[0].left); - Placement pl(arc); - pl.place_with_map(circ, init_map); - Routing router(circ, arc); - std::pair out_circ = router.solve(default_config); - REQUIRE( - respects_connectivity_constraints(out_circ.first, arc, false) == true); - REQUIRE(out_circ.second); - } - GIVEN( - "A smaller circuit that once had a segmentation fault when iterating " - "through commands after clifford_simp() is applied and routing " - "completed.") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{1, 2}, {0, 3}, {1, 4}, {0, 1}, {2, 0}, {0, 1}, {1, 0}}); - Transforms::clifford_simp().apply(circ); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - RoutingConfig default_config(50, 0, 0, 0); - QubitGraph q_graph = - monomorph_interaction_graph(circ, arc.n_connections(), 5); - std::vector potential_maps = - monomorphism_edge_break(arc, q_graph, 10000, 60000); - - qubit_mapping_t init_map = bimap_to_map(potential_maps[0].left); - - Placement pl(arc); - pl.place_with_map(circ, init_map); - Routing router(circ, arc); - Circuit out_circ = router.solve(default_config).first; - REQUIRE(respects_connectivity_constraints(out_circ, arc, false) == true); - } - GIVEN("The circuit that dies with clifford_simp") { - Circuit circ(5); - add_2qb_gates(circ, OpType::CX, {{0, 3}, {1, 4}, {1, 0}, {2, 1}}); - circ.add_op(OpType::SWAP, {3, 4}); - circ.add_op(OpType::Z, {4}); - circ.replace_SWAPs(); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - RoutingConfig default_config(50, 0, 0, 0); - GraphPlacement pl(arc); - qubit_mapping_t pl_map = pl.get_placement_map(circ); - pl.place(circ); - Routing router(circ, arc); - Circuit out_circ = router.solve(default_config).first; - qubit_mapping_t map = router.return_final_map(); - Vertex x = out_circ.add_op(OpType::X, {map.at(pl_map.at(Qubit(4)))}); - Vertex pred = out_circ.get_predecessors(x).front(); - REQUIRE(out_circ.get_OpType_from_Vertex(pred) == OpType::Z); - REQUIRE(NoWireSwapsPredicate().verify(out_circ)); - REQUIRE(respects_connectivity_constraints(out_circ, arc, false) == true); - } -} - -SCENARIO( - "Does the decompose_BRIDGE_gates function correctly decompose the " - "BRIDGE Op, and pick the correct decomposition given the structure of " - "surrounding CX gates?") { - GIVEN("A single BRIDGE gate to be decomposed.") { - Architecture test_arc({{0, 1}, {1, 2}}); - Circuit test_pc(3); - test_pc.add_op(OpType::BRIDGE, {0, 1, 2}); - Transforms::decompose_BRIDGE_to_CX().apply(test_pc); - auto it = test_pc.begin(); - unit_vector_t opt1 = {Qubit(0), Qubit(1)}; - unit_vector_t opt2 = {Qubit(1), Qubit(2)}; - CHECK(it->get_op_ptr()->get_type() == OpType::CX); - CHECK(it->get_args() == opt2); - ++it; - CHECK(it->get_op_ptr()->get_type() == OpType::CX); - CHECK(it->get_args() == opt1); - ++it; - CHECK(it->get_op_ptr()->get_type() == OpType::CX); - CHECK(it->get_args() == opt2); - ++it; - CHECK(it->get_op_ptr()->get_type() == OpType::CX); - CHECK(it->get_args() == opt1); - } - GIVEN("MultpleBRIDGE gate to be decomposed.") { - Architecture test_arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}}); - Circuit test_circuit(6); - test_circuit.add_op(OpType::BRIDGE, {0, 1, 2}); - test_circuit.add_op(OpType::BRIDGE, {1, 2, 3}); - test_circuit.add_op(OpType::BRIDGE, {2, 1, 0}); - test_circuit.add_op(OpType::BRIDGE, {2, 3, 4}); - test_circuit.add_op(OpType::BRIDGE, {3, 4, 5}); - Circuit test_pc(test_circuit); - Transforms::decompose_BRIDGE_to_CX().apply(test_pc); - REQUIRE(test_pc.n_gates() == 20); - } -} - -SCENARIO("Does the rerouting of a solved circuit return 'false'?") { - GIVEN("A simple circuit using default solve.") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{0, 3}, - {1, 4}, - {0, 1}, - {2, 0}, - {2, 1}, - {1, 0}, - {0, 4}, - {2, 1}, - {0, 3}}); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - Routing router(circ, arc); - std::pair out_circ = router.solve(); - REQUIRE(out_circ.second == true); - Routing router2(out_circ.first, arc); - std::pair test_out2 = router2.solve(); - REQUIRE(test_out2.second == false); - Routing router3(test_out2.first, arc); - std::pair test_out3 = router3.solve(); - REQUIRE(test_out3.second == false); - } - GIVEN("A simple circuit, but using a custom map for finding a solution.") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{0, 3}, - {1, 4}, - {0, 1}, - {2, 0}, - {2, 1}, - {1, 0}, - {0, 4}, - {2, 1}, - {0, 3}}); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - RoutingConfig default_config(50, 0, 0, 0); - QubitGraph q_graph = - monomorph_interaction_graph(circ, arc.n_connections(), 5); - std::vector potential_maps = - monomorphism_edge_break(arc, q_graph, 10000, 60000); - qubit_mapping_t init_map = bimap_to_map(potential_maps[0].left); - Placement pl(arc); - pl.place_with_map(circ, init_map); - Routing router(circ, arc); - std::pair out_circ = router.solve(default_config); - REQUIRE( - respects_connectivity_constraints(out_circ.first, arc, false) == true); - REQUIRE(out_circ.second); - - // Now try repeating it, making sure returned bool changes - // make a LinePlacement plaer for the architecture - LinePlacement lp_d(arc); - - Circuit c0 = out_circ.first; - qubit_mapping_t m_0 = lp_d.get_placement_map(c0); - lp_d.place_with_map(c0, m_0); - Routing router2(c0, arc); - std::pair test_out2 = router2.solve(); - - Circuit c1 = test_out2.first; - REQUIRE(test_out2.second == true); - Routing router3(c1, arc); - qubit_vector_t pre_c1 = c1.all_qubits(); - std::pair test_out3 = router3.solve(); - qubit_vector_t post_c1 = test_out3.first.all_qubits(); - REQUIRE(test_out3.second == false); - Circuit c2 = test_out3.first; - Routing router4(c2, arc); - std::pair test_out4 = router4.solve(); - REQUIRE(test_out4.second == false); - } -} -SCENARIO("Routing on architecture with non-contiguous qubit labels") { - GIVEN("A 2-qubit architecture with a gap") { - Architecture arc(std::vector>{{0, 2}}); - PassPtr pass = gen_default_mapping_pass(arc); - Circuit circ(2); - CompilationUnit cu(circ); - pass->apply(cu); - } - GIVEN("A 2-qubit architecture with a gap and some two-qubit gates") { - Architecture arc(std::vector>{{0, 2}}); - PassPtr pass = gen_default_mapping_pass(arc); - Circuit circ(2); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CZ, {1, 0}); - circ.add_op(OpType::SWAP, {0, 1}); - CompilationUnit cu(circ); - pass->apply(cu); - } -} - -SCENARIO("Routing of aas example") { - GIVEN("aas routing - simple example") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example II") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example III") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example IV") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::Rz, 0.1, {2}); - circ.add_op(OpType::Rz, 0.1, {3}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.3, {3}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example V") { - Architecture arc(std::vector{{Node(0), Node(1)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(2); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example VI") { - Architecture arc(std::vector{{Node(0), Node(2)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(2); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - - CompilationUnit cu(circ); - - REQUIRE(pass->apply(cu)); - - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - - const auto s = tket_sim::get_unitary(circ); - const auto s1 = tket_sim::get_unitary(result); - REQUIRE(tket_sim::compare_statevectors_or_unitaries( - s, s1, tket_sim::MatrixEquivalence::EQUAL)); - } - GIVEN("aas routing - simple example VII") { - Architecture arc(std::vector{ - {Node(0), Node(2)}, {Node(2), Node(4)}, {Node(4), Node(6)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::Rz, 0.1, {2}); - circ.add_op(OpType::Rz, 0.1, {3}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - - REQUIRE(pass->apply(cu)); - - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - - const auto s = tket_sim::get_unitary(circ); - const auto s1 = tket_sim::get_unitary(result); - REQUIRE(tket_sim::compare_statevectors_or_unitaries( - s, s1, tket_sim::MatrixEquivalence::EQUAL)); - } - GIVEN("aas routing - simple example VIII") { - Architecture arc(std::vector{ - {Node(1000), Node(10)}, {Node(10), Node(100)}, {Node(100), Node(1)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::Rz, 0.1, {2}); - circ.add_op(OpType::Rz, 0.1, {3}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::H, {2}); - circ.add_op(OpType::H, {3}); - - CompilationUnit cu(circ); - - REQUIRE(pass->apply(cu)); - - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - simple example IX, other gate set") { - Architecture arc(std::vector{ - {Node(1000), Node(10)}, {Node(10), Node(100)}, {Node(100), Node(1)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(4); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - circ.add_op(OpType::X, {3}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::CX, {2, 3}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::Rz, 0.1, {2}); - circ.add_op(OpType::Rz, 0.1, {3}); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - circ.add_op(OpType::X, {3}); - - CompilationUnit cu(circ); - - REQUIRE(pass->apply(cu)); - - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing with measure") { - Architecture arc(std::vector{{Node(0), Node(2)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(2, 2); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.1, {1}); - circ.add_op(OpType::H, {0}); - circ.add_op(OpType::H, {1}); - for (unsigned mes = 0; mes < 2; ++mes) { - circ.add_measure(mes, mes); - } - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - } - GIVEN("aas routing - circuit with fewer qubits then nodes in the arch") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, {Node(1), Node(2)}, {Node(2), Node(3)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(3); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.2, {1}); - circ.add_op(OpType::Rz, 0.3, {2}); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - } - GIVEN("aas routing - circuit with fewer qubits then nodes in the arch II") { - Architecture arc(std::vector{ - {Node(0), Node(1)}, - {Node(1), Node(2)}, - {Node(2), Node(3)}, - {Node(3), Node(4)}}); - PassPtr pass = gen_full_mapping_pass_phase_poly(arc); - Circuit circ(3); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::CX, {1, 2}); - circ.add_op(OpType::Rz, 0.1, {0}); - circ.add_op(OpType::Rz, 0.2, {1}); - circ.add_op(OpType::Rz, 0.3, {2}); - circ.add_op(OpType::X, {0}); - circ.add_op(OpType::X, {1}); - circ.add_op(OpType::X, {2}); - - CompilationUnit cu(circ); - REQUIRE(pass->apply(cu)); - Circuit result = cu.get_circ_ref(); - - REQUIRE(test_unitary_comparison(circ, result)); - } -} - -SCENARIO("Routing preserves the number of qubits") { - std::vector> cons; - cons.push_back({Node("x", 1), Node("x", 0)}); - cons.push_back({Node("x", 2), Node("x", 1)}); - Architecture arc( - std::vector>(cons.begin(), cons.end())); - PassPtr pass = gen_default_mapping_pass(arc); - Circuit c(3); - c.add_op(OpType::CnX, {2, 1}); - CompilationUnit cu(c); - bool applied = pass->apply(cu); - const Circuit &c1 = cu.get_circ_ref(); - REQUIRE(c.n_qubits() == c1.n_qubits()); -} - -SCENARIO( - "Methods related to correct routing and decomposition of circuits with " - "classical wires.") { - GIVEN( - "A circuit with classical wires on CX gates. No Bridge gate " - "allowed.") { - Architecture test_arc({{0, 1}, {1, 2}}); - Circuit circ(3, 2); - circ.add_op(OpType::CX, {0, 1}); - circ.add_op(OpType::H, {0}); - circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0, 1}, 0); - circ.add_conditional_gate(OpType::CX, {}, {2, 1}, {0, 1}, 1); - circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0, 1}, 2); - circ.add_conditional_gate(OpType::CX, {}, {2, 1}, {1, 0}, 3); - circ.add_conditional_gate(OpType::CX, {}, {0, 2}, {0, 1}, 0); - Routing test_router(circ, test_arc); - std::pair output = test_router.solve({50, 0, 0, 0}); - Transforms::decompose_SWAP_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints( - output.first, test_arc, false, false)); - Transforms::decompose_BRIDGE_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints( - output.first, test_arc, false, false)); - } - GIVEN( - "A circuit that requires modification to satisfy architecture " - "constraints.") { - Architecture sg({{0, 1}, {1, 2}, {2, 3}, {3, 4}}); - Circuit circ(5, 1); - circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0}, 1); - add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}, {1, 3}, {1, 4}, {0, 1}}); - Routing test_router(circ, sg); - std::pair output = test_router.solve({50, 0, 0, 0}); - Transforms::decompose_SWAP_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, sg, false, false)); - Transforms::decompose_BRIDGE_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, sg, false, false)); - Command classical_com = output.first.get_commands()[0]; - REQUIRE(classical_com.get_args()[0] == output.first.all_bits()[0]); - } - GIVEN("A single Bridge gate with multiple classical wires, decomposed.") { - Architecture arc({{0, 1}, {1, 2}}); - Circuit circ(3, 3); - circ.add_conditional_gate( - OpType::BRIDGE, {}, {0, 1, 2}, {0, 1, 2}, 1); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); - Transforms::decompose_BRIDGE_to_CX().apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); - for (Command com : circ.get_commands()) { - REQUIRE(com.get_args()[0] == circ.all_bits()[0]); - REQUIRE(com.get_args()[1] == circ.all_bits()[1]); - REQUIRE(com.get_args()[2] == circ.all_bits()[2]); - } - } - GIVEN("A directed architecture, a single CX gate that requires flipping.") { - Architecture arc(std::vector>{{0, 1}}); - Circuit circ(2, 2); - circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {1, 0}, 0); - circ.add_conditional_gate(OpType::CX, {}, {1, 0}, {0, 1}, 1); - reassign_boundary(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, false, false)); - REQUIRE(!respects_connectivity_constraints(circ, arc, true, false)); - Transforms::decompose_CX_directed(arc).apply(circ); - REQUIRE(respects_connectivity_constraints(circ, arc, true, false)); - std::vector all_coms = circ.get_commands(); - REQUIRE(all_coms[0].get_args()[0] == circ.all_bits()[1]); - REQUIRE(all_coms[0].get_args()[1] == circ.all_bits()[0]); - REQUIRE(all_coms[1].get_args()[0] == circ.all_bits()[0]); - REQUIRE(all_coms[1].get_args()[1] == circ.all_bits()[1]); - } - GIVEN( - "A large circuit, with a mixture of conditional CX and CZ with " - "multiple classical wires, non conditional CX and CZ, and single " - "qubit gates.") { - SquareGrid arc(5, 10); - Circuit circ(50, 10); - for (unsigned i = 0; i < 48; i++) { - circ.add_op(OpType::CX, {i, i + 1}); - circ.add_conditional_gate( - OpType::CX, {}, {i + 2, i}, {0, 2, 3, 5}, 1); - circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); - circ.add_conditional_gate( - OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); - circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, 1); - circ.add_conditional_gate( - OpType::CZ, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); - circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); - } - Routing router(circ, arc); - std::pair output = router.solve(); - Transforms::decompose_SWAP_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, arc, false, true)); - Transforms::decompose_BRIDGE_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, arc, false, true)); - } - GIVEN( - "A large circuit, with a mixture of conditional CX and CX gates with " - "multiple classical wires, non conditional CX and, single qubit " - "gates, and a directed architecture.") { - SquareGrid arc(10, 4, 2); - Circuit circ(60, 10); - for (unsigned i = 0; i < 58; i++) { - circ.add_op(OpType::CX, {i, i + 1}); - circ.add_conditional_gate( - OpType::CX, {}, {i + 2, i}, {0, 2, 3, 5}, 1); - circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); - circ.add_conditional_gate( - OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); - circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, 1); - circ.add_conditional_gate( - OpType::CX, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); - circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); - } - Routing router(circ, arc); - std::pair output = router.solve(); - Transforms::decompose_SWAP_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, arc, false, true)); - Transforms::decompose_BRIDGE_to_CX().apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, arc, false, true)); - Transforms::decompose_CX_directed(arc).apply(output.first); - REQUIRE(respects_connectivity_constraints(output.first, arc, true, true)); - } -} -SCENARIO( - "Does copying decompose_SWAP_to_CX pass and applying it to a routed " - "Circuit work correctly?") { - GIVEN("A simple circuit and architecture.") { - Circuit circ(5); - add_2qb_gates( - circ, OpType::CX, - {{0, 3}, - {1, 4}, - {0, 1}, - {2, 0}, - {2, 1}, - {1, 0}, - {0, 4}, - {2, 1}, - {0, 3}}); - Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); - Routing router(circ, arc); - Circuit c = router.solve().first; - Transform T_1 = Transforms::decompose_SWAP_to_CX(); - T_1.apply(c); - REQUIRE(c.count_gates(OpType::SWAP) == 0); - } -} - -SCENARIO("Does add_distributed_cx account for incorrect BRIDGE nodes?") { - GIVEN("An incorrect and a correct BRIDGE orientation.") { - Architecture a({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}}); - Circuit c(6); - c.add_op(OpType::CX, {3, 5}); - c.add_op(OpType::CX, {2, 0}); - - Placement placer(a); - qubit_vector_t c_qubits = c.all_qubits(); - node_vector_t a_nodes = a.get_all_nodes_vec(); - - qubit_mapping_t initial_map = { - {c_qubits[0], a_nodes[0]}, {c_qubits[1], a_nodes[1]}, - {c_qubits[2], a_nodes[2]}, {c_qubits[3], a_nodes[3]}, - {c_qubits[4], a_nodes[4]}, {c_qubits[5], a_nodes[5]}}; - - placer.place_with_map(c, initial_map); - - Routing r(c, a); - RoutingTester rt(&r); - - rt.initialise_slicefrontier(); - qubit_bimap_t qbm; - for (unsigned nn = 0; nn <= 5; ++nn) { - qbm.insert({a_nodes[nn], Node(nn)}); - } - - rt.set_qmap(qbm); - - rt.add_distributed_cx(Node(5), Node(3), Node(4)); - rt.add_distributed_cx(Node(2), Node(0), Node(1)); - - std::vector bridge_commands = rt.get_circ()->get_commands(); - qubit_vector_t com_0_qubits = {a_nodes[2], a_nodes[1], a_nodes[0]}; - qubit_vector_t com_1_qubits = {a_nodes[3], a_nodes[4], a_nodes[5]}; - REQUIRE(bridge_commands[0].get_qubits() == com_0_qubits); - REQUIRE(bridge_commands[1].get_qubits() == com_1_qubits); - } - GIVEN("An invalid BRIDGE.") { - Architecture a({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}}); - Circuit c(6); - c.add_op(OpType::CX, {2, 5}); - c.add_op(OpType::CX, {0, 1}); - - Placement placer(a); - qubit_vector_t c_qubits = c.all_qubits(); - node_vector_t a_nodes = a.get_all_nodes_vec(); - - qubit_mapping_t initial_map = { - {c_qubits[0], a_nodes[0]}, {c_qubits[1], a_nodes[1]}, - {c_qubits[2], a_nodes[2]}, {c_qubits[3], a_nodes[3]}, - {c_qubits[4], a_nodes[4]}, {c_qubits[5], a_nodes[5]}}; - - placer.place_with_map(c, initial_map); - - Routing r(c, a); - RoutingTester rt(&r); - - rt.initialise_slicefrontier(); - qubit_bimap_t qbm; - for (unsigned nn = 0; nn <= 5; ++nn) { - qbm.insert({a_nodes[nn], Node(nn)}); - } - - rt.set_qmap(qbm); - - REQUIRE_THROWS_AS( - rt.add_distributed_cx(Node(2), Node(4), Node(5)), BridgeInvalid); - REQUIRE_THROWS_AS( - rt.add_distributed_cx(Node(0), Node(1), Node(3)), BridgeInvalid); - REQUIRE_THROWS_AS( - rt.add_distributed_cx(Node(0), Node(1), Node(2)), BridgeInvalid); - } -} - -} // namespace test_Routing -} // namespace tket diff --git a/tket/tests/test_RoutingMethod.cpp b/tket/tests/test_RoutingMethod.cpp new file mode 100644 index 0000000000..01ca79e5f8 --- /dev/null +++ b/tket/tests/test_RoutingMethod.cpp @@ -0,0 +1,247 @@ +#include +#include +#include +#include + +#include "Mapping/MappingFrontier.hpp" +#include "Mapping/RoutingMethodCircuit.hpp" +#include "Placement/Placement.hpp" + +namespace tket { + +SCENARIO("Test RoutingMethod default methods.") { + RoutingMethod rm; + Architecture arc( + {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + Circuit circ(3); + MappingFrontier_ptr mf = std::make_shared(circ); + unit_map_t empty; + std::pair rm_return = rm.routing_method(mf, shared_arc); + REQUIRE(!rm_return.first); + REQUIRE(rm_return.second == empty); +} + +std::tuple +test_routing_method_mf_simple_relabel( + const Circuit& c, const ArchitecturePtr& a) { + Circuit copy(c); + std::vector qs = copy.all_qubits(); + std::vector ns = a->get_all_nodes_vec(); + // enforce in tests that ns >= qs, this is testing purposes only so fine... + unit_map_t rename_map, final_map; + for (unsigned i = 0; i < qs.size(); i++) { + rename_map.insert({qs[i], ns[i]}); + final_map.insert({ns[i], ns[i]}); + } + copy.rename_units(rename_map); + return {true, copy, rename_map, final_map}; +} + +std::tuple +test_routing_method_mf_swap_perm(const Circuit& c, const ArchitecturePtr& a) { + if (c.n_qubits() > 2 && a->n_nodes() > 2) { + Circuit copy(c); + std::vector qs = copy.all_qubits(); + std::vector ns = a->get_all_nodes_vec(); + // enforce in tests that ns >= qs, this is testing purposes only so fine... + unit_map_t rename_map, final_map; + for (unsigned i = 0; i < qs.size(); i++) { + rename_map.insert({qs[i], ns[i]}); + final_map.insert({ns[i], ns[i]}); + } + copy.rename_units(rename_map); + MappingFrontier mf(copy); + // n.b. add_swap permutes out edge of both boundaries, + mf.add_swap(Node("t", 0), Node("t", 1)); + + return {true, copy, rename_map, final_map}; + } else { + return {false, Circuit(), {}, {}}; + } +} + +std::tuple +test_routing_method_mf_swap_no_perm( + const Circuit& c, const ArchitecturePtr& a) { + if (c.n_qubits() > 2 && a->n_nodes() > 2) { + Circuit copy(c); + std::vector qs = copy.all_qubits(); + std::vector ns = a->get_all_nodes_vec(); + // enforce in tests that ns >= qs, this is testing purposes only so fine... + unit_map_t rename_map, final_map; + for (unsigned i = 0; i < qs.size(); i++) { + rename_map.insert({qs[i], ns[i]}); + final_map.insert({ns[i], ns[i]}); + } + copy.rename_units(rename_map); + MappingFrontier mf(copy); + // n.b. add_swap permutes out edge of both boundaries, + mf.add_swap(Node("t", 0), Node("t", 1)); + final_map[Node("t", 0)] = Node("t", 1); + final_map[Node("t", 1)] = Node("t", 0); + + return {true, copy, rename_map, final_map}; + } else { + return {false, Circuit(), {}, {}}; + } +} + +std::tuple +test_routing_method_circuit_no_perm( + const Circuit& c, const ArchitecturePtr& a) { + if (c.n_qubits() > 2 && a->n_nodes() > 2) { + Circuit copy(c.n_qubits()); + copy.add_op(OpType::SWAP, {0, 1}); + copy.add_op(OpType::CX, {1, 0}); + copy.add_op(OpType::CX, {1, 0}); + + std::vector qs = copy.all_qubits(); + std::vector ns = a->get_all_nodes_vec(); + // enforce in tests that ns >= qs, this is testing purposes only so fine... + unit_map_t rename_map, final_map; + for (unsigned i = 0; i < qs.size(); i++) { + rename_map.insert({qs[i], ns[i]}); + final_map.insert({ns[i], ns[i]}); + } + copy.rename_units(rename_map); + MappingFrontier mf(copy); + final_map[Node("t", 0)] = Node("t", 1); + final_map[Node("t", 1)] = Node("t", 0); + return {true, copy, rename_map, final_map}; + } else { + return {false, Circuit(), {}, {}}; + } +} + +SCENARIO("Test RoutingMethodCircuit checking criteria") { + RoutingMethodCircuit rmc(test_routing_method_mf_swap_no_perm, 5, 5); + Circuit c(2), circ3(3); + c.add_op(OpType::CX, {0, 1}); + circ3.add_op(OpType::CX, {0, 2}); + circ3.add_op(OpType::CX, {2, 1}); + MappingFrontier_ptr mf2 = std::make_shared(c); + MappingFrontier_ptr mf3 = std::make_shared(circ3); + + Architecture arc( + {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + + std::pair res0 = rmc.routing_method(mf2, shared_arc); + REQUIRE(!res0.first); + std::pair res1 = rmc.routing_method(mf3, shared_arc); + REQUIRE(res1.first); +} +SCENARIO("Test RoutingMethodCircuit::routing_method") { + Circuit comp(3); + comp.add_op(OpType::SWAP, {0, 1}); + comp.add_op(OpType::CX, {1, 0}); + comp.add_op(OpType::CX, {1, 0}); + comp.add_op(OpType::CX, {1, 0}); + comp.add_op(OpType::CX, {1, 0}); + auto qbs = comp.all_qubits(); + unit_map_t rename_map = { + {qbs[0], Node("t", 0)}, {qbs[1], Node("t", 1)}, {qbs[2], Node("t", 2)}}; + comp.rename_units(rename_map); + qubit_map_t permutation = { + {Node("t", 0), Node("t", 1)}, {Node("t", 1), Node("t", 0)}}; + comp.permute_boundary_output(permutation); + + GIVEN("Non-implicit Permutation method, using MappingFrontier::add_swap") { + RoutingMethodCircuit rmc(test_routing_method_mf_swap_no_perm, 2, 2); + Circuit c(3); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + + MappingFrontier_ptr mf = std::make_shared(c); + Architecture arc( + {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + std::pair output = rmc.routing_method(mf, shared_arc); + unit_map_t empty; + REQUIRE(output.first); + REQUIRE(output.second == empty); + REQUIRE(c == comp); + } + GIVEN("Non-implicit Permutation method, using circuit replacement") { + RoutingMethodCircuit rmc(test_routing_method_circuit_no_perm, 2, 2); + Circuit c(3); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + + MappingFrontier_ptr mf = std::make_shared(c); + Architecture arc( + {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + std::pair output = rmc.routing_method(mf, shared_arc); + unit_map_t empty; + REQUIRE(output.first); + REQUIRE(output.second == empty); + REQUIRE(c == comp); + } + GIVEN("Implicit Permutation method, using MappingFrontier::add_swap") { + RoutingMethodCircuit rmc(test_routing_method_mf_swap_perm, 2, 2); + Circuit c(3); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {0, 1}); + + MappingFrontier_ptr mf = std::make_shared(c); + Architecture arc( + {{Node("t", 1), Node("t", 0)}, {Node("t", 2), Node("t", 1)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + std::pair output = rmc.routing_method(mf, shared_arc); + unit_map_t empty; + REQUIRE(output.first); + REQUIRE(output.second == empty); + + Circuit comp1(3); + comp1.add_op(OpType::SWAP, {0, 1}); + comp1.add_op(OpType::CX, {1, 0}); + comp1.add_op(OpType::CX, {1, 0}); + comp1.add_op(OpType::CX, {0, 1}); + comp1.add_op(OpType::CX, {0, 1}); + qbs = comp1.all_qubits(); + rename_map = { + {qbs[0], Node("t", 0)}, {qbs[1], Node("t", 1)}, {qbs[2], Node("t", 2)}}; + comp1.rename_units(rename_map); + + REQUIRE(c == comp1); + } +} + +SCENARIO("Test RoutingMethodCircuit produces correct map") { + RoutingMethodCircuit rmc(test_routing_method_mf_simple_relabel, 5, 5); + Architecture arc({{Node(0), Node(1)}, {Node(1), Node(2)}}); + ArchitecturePtr shared_arc = std::make_shared(arc); + Circuit c(3); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {1, 2}); + std::shared_ptr maps = std::make_shared(); + // Initialise the maps by the same way it's done with CompilationUnit + for (const UnitID& u : c.all_units()) { + maps->initial.insert({u, u}); + maps->final.insert({u, u}); + } + Placement pl(arc); + qubit_mapping_t partial_map; + partial_map.insert({Qubit(0), Node(0)}); + partial_map.insert({Qubit(1), Node(1)}); + // We leave q[2] unplaced + pl.place_with_map(c, partial_map, maps); + MappingFrontier_ptr mf = std::make_shared(c, maps); + + std::pair res = rmc.routing_method(mf, shared_arc); + + for (const Qubit& q : c.all_qubits()) { + REQUIRE(maps->initial.right.find(q) != maps->initial.right.end()); + REQUIRE(maps->final.right.find(q) != maps->final.right.end()); + } +} + +} // namespace tket \ No newline at end of file diff --git a/tket/tests/test_RoutingPasses.cpp b/tket/tests/test_RoutingPasses.cpp new file mode 100644 index 0000000000..3b2643e604 --- /dev/null +++ b/tket/tests/test_RoutingPasses.cpp @@ -0,0 +1,467 @@ +// Copyright 2019-2022 Cambridge Quantum Computing +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "Characterisation/DeviceCharacterisation.hpp" +#include "Circuit/Circuit.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/MappingManager.hpp" +#include "Mapping/Verification.hpp" +#include "OpType/OpType.hpp" +#include "Predicates/CompilerPass.hpp" +#include "Predicates/PassGenerators.hpp" +#include "Predicates/Predicates.hpp" +#include "Simulation/CircuitSimulator.hpp" +#include "Simulation/ComparisonFunctions.hpp" +#include "Transformations/BasicOptimisation.hpp" +#include "Transformations/Decomposition.hpp" +#include "Transformations/OptimisationPass.hpp" +#include "Transformations/Rebase.hpp" +#include "Transformations/Transform.hpp" +#include "Utils/HelperFunctions.hpp" +#include "testutil.hpp" + +namespace tket { + +using Connection = Architecture::Connection; + +SCENARIO("Test decompose_SWAP_to_CX pass", ) { + Architecture arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 0}}); + GIVEN("A single SWAP gate. Finding if correct number of vertices added") { + Circuit circ(5); + circ.add_op(OpType::SWAP, {0, 1}); + int original_vertices = circ.n_vertices(); + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX().apply(circ); + int decompose_vertices = circ.n_vertices(); + REQUIRE(decompose_vertices - original_vertices == 2); + REQUIRE(respects_connectivity_constraints(circ, arc, false)); + } + GIVEN("A single SWAP gate, finding if correct path is preserved.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {0, 1}); + // check output boundary + Vertex boundary_0 = circ.get_out(Qubit(0)); + Vertex boundary_1 = circ.get_out(Qubit(1)); + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(circ.get_out(Qubit(0)) == boundary_0); + REQUIRE(circ.get_out(Qubit(1)) == boundary_1); + // check output boundary is the same + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {0, 1}); + circ.add_op(OpType::CX, {0, 1}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[0], all[1]}; + REQUIRE(circ.get_commands()[2].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite case.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {0, 1}); + circ.add_op(OpType::CX, {1, 0}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[1], all[0]}; + REQUIRE(circ.get_commands()[2].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite SWAP.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {1, 0}); + circ.add_op(OpType::CX, {0, 1}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[0], all[1]}; + REQUIRE(circ.get_commands()[2].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite case, opposite SWAP.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {1, 0}); + circ.add_op(OpType::CX, {1, 0}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[1], all[0]}; + REQUIRE(circ.get_commands()[2].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite SWAP, pre CX.") { + Circuit circ(2); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::SWAP, {1, 0}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[0], all[1]}; + REQUIRE(circ.get_commands()[1].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite case, opposite SWAP, pre CX.") { + Circuit circ(2); + circ.add_op(OpType::CX, {1, 0}); + circ.add_op(OpType::SWAP, {1, 0}); + Transforms::decompose_SWAP_to_CX().apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[1], all[0]}; + REQUIRE(circ.get_commands()[1].get_args() == cor); + } + GIVEN( + "A circuit that facilitates some CX annihilation for an undirected " + "architecture, opposite case, opposite SWAP, pre CX, directed bool " + "on.") { + Circuit circ(2); + circ.add_op(OpType::CX, {1, 0}); + circ.add_op(OpType::SWAP, {1, 0}); + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX(arc).apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[1], all[0]}; + REQUIRE(circ.get_commands()[1].get_args() == cor); + } + GIVEN("A circuit that with no CX gates, but with directed architecture.") { + Circuit circ(2); + circ.add_op(OpType::SWAP, {1, 0}); + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX(arc).apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[0], all[1]}; + REQUIRE(circ.get_commands()[0].get_args() == cor); + } + GIVEN( + "A circuit that with no CX gates, but with directed architecture, " + "opposite case.") { + Architecture dummy_arc({{1, 0}}); + Circuit circ(2); + circ.add_op(OpType::SWAP, {1, 0}); + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX(dummy_arc).apply(circ); + qubit_vector_t all = circ.all_qubits(); + unit_vector_t cor = {all[1], all[0]}; + REQUIRE(circ.get_commands()[0].get_args() == cor); + } + // TEST CIRCUIT + Circuit circ(10); + int count = 0; + for (unsigned x = 0; x < 10; ++x) { + for (unsigned y = 0; y + 1 < x; ++y) { + count += 2; + if (x % 2) { + add_2qb_gates(circ, OpType::SWAP, {{x, y}, {y + 1, y}}); + } else { + add_2qb_gates(circ, OpType::SWAP, {{y, x}, {y, y + 1}}); + } + } + } + + GIVEN("A network of SWAP gates.") { + int original_vertices = circ.n_vertices(); + std::vector original_boundary; + for (unsigned i = 0; i < circ.n_qubits(); i++) { + original_boundary.push_back(circ.get_out(Qubit(i))); + } + Transforms::decompose_SWAP_to_CX().apply(circ); + int decompose_vertices = circ.n_vertices(); + for (unsigned i = 0; i < circ.n_qubits(); i++) { + REQUIRE(original_boundary[i] == circ.get_out(Qubit(i))); + } + REQUIRE(decompose_vertices - original_vertices == 2 * count); + } + GIVEN("A routed network of SWAP gates.") { + SquareGrid grid(2, 5); + MappingManager mm(std::make_shared(grid)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, grid, false, true)); + GIVEN("Directed CX gates") { + Transforms::decompose_SWAP_to_CX().apply(circ); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + Transforms::decompose_CX_directed(grid).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, grid, true)); + } + } +} + +SCENARIO("Test redirect_CX_gates pass", "[routing]") { + Architecture arc({{1, 0}, {1, 2}}); + GIVEN("A circuit that requires no redirection.") { + Circuit circ(3); + add_2qb_gates(circ, OpType::CX, {{1, 0}, {1, 2}}); + reassign_boundary(circ); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A circuit that requires redirection.") { + Circuit circ(3); + add_2qb_gates(circ, OpType::CX, {{0, 1}, {2, 1}}); + reassign_boundary(circ); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A circuit that requires no redirection, with SWAP.") { + Circuit circ(3); + + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {0, 1}); + + swap_v = circ.add_op(OpType::SWAP, {0, 2}); + swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {2, 1}); + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX(arc).apply(circ); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A circuit that requires redirection, with SWAP.") { + Circuit circ(3); + + Vertex swap_v = circ.add_op(OpType::SWAP, {1, 0}); + EdgeVec swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {1, 0}); + + swap_v = circ.add_op(OpType::SWAP, {0, 2}); + swap_outs = circ.get_all_out_edges(swap_v); + circ.dag[swap_outs[0]].ports.first = 1; + circ.dag[swap_outs[1]].ports.first = 0; + + circ.add_op(OpType::CX, {1, 2}); + + reassign_boundary(circ); + Transforms::decompose_SWAP_to_CX(arc).apply(circ); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true)); + } + GIVEN("A complicated circuit of CX gates, routed.") { + Circuit circ(12); + SquareGrid grid(3, 4); + + for (unsigned x = 0; x < 12; ++x) { + for (unsigned y = 0; y + 1 < x; ++y) { + if (x % 2) { + add_2qb_gates(circ, OpType::CX, {{x, y}, {y + 1, y}}); + } else { + add_2qb_gates(circ, OpType::CX, {{y, x}, {y, y + 1}}); + } + } + } + MappingManager mm(std::make_shared(grid)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + Transforms::decompose_SWAP_to_CX(arc).apply(circ); + Transforms::decompose_CX_directed(grid).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, grid, true)); + } +} + +SCENARIO("Routing preserves the number of qubits in given instance of CnX op") { + std::vector> cons; + cons.push_back({Node("x", 1), Node("x", 0)}); + cons.push_back({Node("x", 2), Node("x", 1)}); + Architecture arc( + std::vector>(cons.begin(), cons.end())); + PassPtr pass = gen_default_mapping_pass(arc, false); + Circuit c(3); + c.add_op(OpType::CnX, {2, 1}); + CompilationUnit cu(c); + bool applied = pass->apply(cu); + const Circuit &c1 = cu.get_circ_ref(); + REQUIRE(c.n_qubits() == c1.n_qubits()); +} + +SCENARIO("Default mapping pass delays measurements") { + std::vector> cons; + cons.push_back({Node("x", 0), Node("x", 2)}); + cons.push_back({Node("x", 1), Node("x", 2)}); + cons.push_back({Node("x", 2), Node("x", 3)}); + cons.push_back({Node("x", 3), Node("x", 0)}); + Architecture arc( + std::vector>(cons.begin(), cons.end())); + PassPtr pass = gen_default_mapping_pass(arc, false); + Circuit c(4, 4); + c.add_op(OpType::CX, {0, 1}); + c.add_op(OpType::CX, {1, 2}); + c.add_op(OpType::CX, {2, 3}); + c.add_op(OpType::CX, {3, 0}); + for (unsigned nn = 0; nn <= 3; ++nn) { + c.add_measure(nn, nn); + } + Circuit c2(c); + CompilationUnit cu(c); + REQUIRE(pass->apply(cu)); + CompilationUnit cu2(c2); + // delay_measures is default to true + PassPtr pass2 = gen_default_mapping_pass(arc); + REQUIRE(pass2->apply(cu2)); + PredicatePtr mid_meas_pred = std::make_shared(); + REQUIRE(!mid_meas_pred->verify(cu.get_circ_ref())); + REQUIRE(mid_meas_pred->verify(cu2.get_circ_ref())); +} + +SCENARIO( + "Methods related to correct routing and decomposition of circuits with " + "classical wires.") { + GIVEN("A circuit with classical wires on CX gates.") { + Architecture test_arc({{0, 1}, {1, 2}}); + Circuit circ(3, 2); + circ.add_op(OpType::CX, {0, 1}); + circ.add_op(OpType::H, {0}); + circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0, 1}, 0); + circ.add_conditional_gate(OpType::CX, {}, {2, 1}, {0, 1}, 1); + circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0, 1}, 2); + circ.add_conditional_gate(OpType::CX, {}, {2, 1}, {1, 0}, 3); + circ.add_conditional_gate(OpType::CX, {}, {0, 2}, {0, 1}, 0); + MappingManager mm(std::make_shared(test_arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, test_arc, false, false)); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, test_arc, false, false)); + } + GIVEN( + "A circuit that requires modification to satisfy architecture " + "constraints.") { + Architecture arc({{0, 1}, {1, 2}, {2, 3}, {3, 4}}); + Circuit circ(5, 1); + circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {0}, 1); + add_2qb_gates(circ, OpType::CX, {{0, 1}, {1, 2}, {1, 3}, {1, 4}, {0, 1}}); + + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, false)); + Command classical_com = circ.get_commands()[0]; + REQUIRE(classical_com.get_args()[0] == circ.all_bits()[0]); + } + GIVEN("A single Bridge gate with multiple classical wires, decomposed.") { + Architecture arc({{0, 1}, {1, 2}}); + Circuit circ(3, 3); + circ.add_conditional_gate( + OpType::BRIDGE, {}, {0, 1, 2}, {0, 1, 2}, 1); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + for (Command com : circ.get_commands()) { + REQUIRE(com.get_args()[0] == circ.all_bits()[0]); + REQUIRE(com.get_args()[1] == circ.all_bits()[1]); + REQUIRE(com.get_args()[2] == circ.all_bits()[2]); + } + } + GIVEN("A directed architecture, a single CX gate that requires flipping.") { + Architecture arc(std::vector>{{0, 1}}); + Circuit circ(2, 2); + circ.add_conditional_gate(OpType::CX, {}, {0, 1}, {1, 0}, 0); + circ.add_conditional_gate(OpType::CX, {}, {1, 0}, {0, 1}, 1); + reassign_boundary(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, false)); + REQUIRE(!respects_connectivity_constraints(circ, arc, true, false)); + Transforms::decompose_CX_directed(arc).apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, true, false)); + std::vector all_coms = circ.get_commands(); + REQUIRE(all_coms[0].get_args()[0] == circ.all_bits()[1]); + REQUIRE(all_coms[0].get_args()[1] == circ.all_bits()[0]); + REQUIRE(all_coms[1].get_args()[0] == circ.all_bits()[0]); + REQUIRE(all_coms[1].get_args()[1] == circ.all_bits()[1]); + } + GIVEN( + "A large circuit, with a mixture of conditional CX and CZ with " + "multiple classical wires, non conditional CX and CZ, and single " + "qubit gates.") { + SquareGrid arc(5, 10); + Circuit circ(50, 10); + for (unsigned i = 0; i < 48; i++) { + circ.add_op(OpType::CX, {i, i + 1}); + circ.add_conditional_gate( + OpType::CX, {}, {i + 2, i}, {0, 2, 3, 5}, 1); + circ.add_conditional_gate(OpType::H, {}, {i}, {0, 7}, 1); + circ.add_conditional_gate( + OpType::CX, {}, {i + 2, i + 1}, {1, 2, 3, 5, 9}, 0); + circ.add_conditional_gate(OpType::S, {}, {i + 1}, {1, 2, 7}, 1); + circ.add_conditional_gate( + OpType::CZ, {}, {i, i + 1}, {4, 6, 8, 7, 9}, 0); + circ.add_conditional_gate(OpType::X, {}, {i + 2}, {0, 3}, 0); + } + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + + Transforms::decompose_SWAP_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + Transforms::decompose_BRIDGE_to_CX().apply(circ); + REQUIRE(respects_connectivity_constraints(circ, arc, false, true)); + } +} + +SCENARIO( + "Does copying decompose_SWAP_to_CX pass and applying it to a routed " + "Circuit work correctly?") { + GIVEN("A simple circuit and architecture.") { + Circuit circ(5); + add_2qb_gates( + circ, OpType::CX, + {{0, 3}, + {1, 4}, + {0, 1}, + {2, 0}, + {2, 1}, + {1, 0}, + {0, 4}, + {2, 1}, + {0, 3}}); + Architecture arc({{1, 0}, {0, 2}, {1, 2}, {2, 3}, {2, 4}, {4, 3}}); + MappingManager mm(std::make_shared(arc)); + REQUIRE(mm.route_circuit( + circ, {std::make_shared(), + std::make_shared()})); + Transform T_1 = Transforms::decompose_SWAP_to_CX(); + T_1.apply(circ); + REQUIRE(circ.count_gates(OpType::SWAP) == 0); + } +} +} // namespace tket diff --git a/tket/tests/test_Synthesis.cpp b/tket/tests/test_Synthesis.cpp index 5bc342d74c..a8ec7cb7ac 100644 --- a/tket/tests/test_Synthesis.cpp +++ b/tket/tests/test_Synthesis.cpp @@ -16,6 +16,7 @@ #include #include +#include "Circuit/CircPool.hpp" #include "Circuit/CircUtils.hpp" #include "CircuitsForTesting.hpp" #include "Gate/Rotation.hpp" @@ -705,16 +706,16 @@ SCENARIO("Testing general 1qb squash") { Circuit copy = circ; OpTypeSet singleqs = {OpType::Rz, OpType::PhasedX}; bool success = - Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE_FALSE(success); singleqs.insert(OpType::Rx); - success = Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + success = Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE(success); check_command_types(circ, {OpType::Rz, OpType::PhasedX}); REQUIRE(test_unitary_comparison(circ, copy)); - success = Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + success = Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE_FALSE(success); } @@ -725,12 +726,12 @@ SCENARIO("Testing general 1qb squash") { Circuit copy = circ; OpTypeSet singleqs = {OpType::Rz, OpType::PhasedX}; bool success = - Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE(success); check_command_types(circ, {OpType::PhasedX}); REQUIRE(test_unitary_comparison(circ, copy)); - success = Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + success = Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE_FALSE(success); } @@ -742,12 +743,12 @@ SCENARIO("Testing general 1qb squash") { Circuit copy = circ; OpTypeSet singleqs = {OpType::Rz, OpType::PhasedX}; bool success = - Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE(success); check_command_types(circ, {OpType::Rz}); REQUIRE(test_unitary_comparison(circ, copy)); - success = Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + success = Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE_FALSE(success); } @@ -759,13 +760,15 @@ SCENARIO("Testing general 1qb squash") { bool success = Transforms::rebase_HQS().apply(circ); REQUIRE(success); OpTypeSet singleqs = {OpType::Rz, OpType::PhasedX}; - success = Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + success = Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE(success); + success = Transforms::remove_redundancies().apply(circ); + REQUIRE(success); check_command_types( circ, {OpType::Rz, OpType::PhasedX, OpType::ZZMax, OpType::Rz, OpType::PhasedX}); - success = Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + success = Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE_FALSE(success); } @@ -788,7 +791,7 @@ SCENARIO("Testing general 1qb squash") { circ.add_op(OpType::Rz, 1., {0}); OpTypeSet singleqs = {OpType::Rz, OpType::Rx, OpType::PhasedX}; bool success = - Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE(success); check_command_types( @@ -796,7 +799,7 @@ SCENARIO("Testing general 1qb squash") { {OpType::Rz, OpType::PhasedX, OpType::Conditional, OpType::Conditional, OpType::Conditional, OpType::Conditional, OpType::Conditional, OpType::Conditional, OpType::Rz, OpType::PhasedX}); - success = Transforms::squash_factory(singleqs, Transforms::tk1_to_PhasedXRz) + success = Transforms::squash_factory(singleqs, CircPool::tk1_to_PhasedXRz) .apply(circ); REQUIRE_FALSE(success); } @@ -1400,14 +1403,15 @@ SCENARIO("Test synthesise_UMD") { Expr a = 0.; Expr b = 0.; Expr c = 0.; - Circuit circ = Transforms::tk1_to_PhasedXRz(a, b, c); + Circuit circ = CircPool::tk1_to_PhasedXRz(a, b, c); + Transforms::remove_redundancies().apply(circ); REQUIRE(circ.n_gates() == 0); } GIVEN("An Rz in disguise") { Expr a = 0.3; Expr b = 0.; Expr c = 1.3; - Circuit circ = Transforms::tk1_to_PhasedXRz(a, b, c); + Circuit circ = CircPool::tk1_to_PhasedXRz(a, b, c); REQUIRE(circ.n_gates() == 1); } GIVEN("Y-gate") { diff --git a/tket/tests/test_json.cpp b/tket/tests/test_json.cpp index 70aa8cc755..beab6979e5 100644 --- a/tket/tests/test_json.cpp +++ b/tket/tests/test_json.cpp @@ -24,6 +24,9 @@ #include "CircuitsForTesting.hpp" #include "Converters/PhasePoly.hpp" #include "Gate/SymTable.hpp" +#include "Mapping/LexiLabelling.hpp" +#include "Mapping/LexiRoute.hpp" +#include "Mapping/RoutingMethod.hpp" #include "OpType/OpType.hpp" #include "Ops/OpPtr.hpp" #include "Predicates/PassGenerators.hpp" @@ -33,6 +36,7 @@ #include "Transformations/Transform.hpp" #include "Utils/Json.hpp" #include "testutil.hpp" + namespace tket { namespace test_json { @@ -346,14 +350,6 @@ SCENARIO("Test Circuit serialization") { } SCENARIO("Test config serializations") { - GIVEN("RoutingConfig") { - RoutingConfig orig(20, 6, 3, 2.5); - nlohmann::json j_config = orig; - RoutingConfig loaded = j_config.get(); - REQUIRE(orig == loaded); - nlohmann::json j_loaded = loaded; - REQUIRE(j_config == j_loaded); - } GIVEN("PlacementConfig") { PlacementConfig orig(5, 20, 100000, 10, 1); nlohmann::json j_config = orig; @@ -428,6 +424,35 @@ SCENARIO("Test device serializations") { } } +SCENARIO("Test RoutingMethod serializations") { + RoutingMethod rm; + nlohmann::json rm_j = rm; + RoutingMethod loaded_rm_j = rm_j.get(); + + Circuit c(2, 2); + c.add_op(OpType::CX, {0, 1}); + + MappingFrontier mf(c); + MappingFrontier_ptr mf_sp = std::make_shared(mf); + CHECK(!loaded_rm_j.routing_method(mf_sp, std::make_shared(2, 2)) + .first); + + std::vector rmp = { + std::make_shared(rm), + std::make_shared(), + std::make_shared(5)}; + + nlohmann::json rmp_j = rmp; + std::vector loaded_rmp_j = + rmp_j.get>(); + CHECK(!loaded_rmp_j[0] + ->routing_method(mf_sp, std::make_shared(2, 2)) + .first); + CHECK(loaded_rmp_j[1] + ->routing_method(mf_sp, std::make_shared(2, 2)) + .first); +} + SCENARIO("Test predicate serializations") { #define BASICPREDJSONTEST(classname) \ GIVEN(#classname) { \ @@ -516,7 +541,8 @@ SCENARIO("Test predicate serializations") { SCENARIO("Test compiler pass serializations") { Architecture arc = SquareGrid(2, 4, 2); - RoutingConfig rcon(20, 6, 3, 2.5); + RoutingMethodPtr rmp = std::make_shared(80); + std::vector rcon = {rmp}; PlacementConfig plcon(5, 20, 100000, 10, 1000); PlacementPtr place = std::make_shared(arc, plcon); std::map qmap = {{Qubit(0), Node(1)}, {Qubit(3), Node(2)}}; @@ -596,6 +622,7 @@ SCENARIO("Test compiler pass serializations") { COMPPASSJSONTEST(PlacementPass, gen_placement_pass(place)) // TKET-1419 COMPPASSJSONTEST(NoiseAwarePlacement, gen_placement_pass(na_place)) + COMPPASSJSONTEST(NaivePlacementPass, gen_naive_placement_pass(arc)) #undef COMPPASSJSONTEST GIVEN("RoutingPass") { // Can only be applied to placed circuits @@ -613,6 +640,25 @@ SCENARIO("Test compiler pass serializations") { nlohmann::json j_loaded = loaded; REQUIRE(j_pp == j_loaded); } + GIVEN("Routing with multiple routing methods") { + RoutingMethodPtr mrmp = + std::make_shared(60, 80); + RoutingMethodPtr brmp = std::make_shared(); + std::vector mrcon = {mrmp, rmp, brmp}; + Circuit circ = CircuitsForTesting::get().uccsd; + CompilationUnit cu{circ}; + PassPtr placement = gen_placement_pass(place); + placement->apply(cu); + CompilationUnit copy = cu; + PassPtr pp = gen_routing_pass(arc, mrcon); + nlohmann::json j_pp = pp; + PassPtr loaded = j_pp.get(); + pp->apply(cu); + loaded->apply(copy); + REQUIRE(cu.get_circ_ref() == copy.get_circ_ref()); + nlohmann::json j_loaded = loaded; + REQUIRE(j_pp == j_loaded); + } #define COMPPASSDESERIALIZE(passname, pass) \ GIVEN(#passname) { \ Circuit circ = CircuitsForTesting::get().uccsd; \ @@ -640,7 +686,13 @@ SCENARIO("Test compiler pass serializations") { j_pp["StandardPass"]["name"] = "FullMappingPass"; j_pp["StandardPass"]["architecture"] = arc; j_pp["StandardPass"]["placement"] = place; - j_pp["StandardPass"]["routing_config"] = rcon; + + nlohmann::json config_array; + for (const auto& con : rcon) { + config_array.push_back(*con); + } + + j_pp["StandardPass"]["routing_config"] = config_array; PassPtr loaded = j_pp.get(); pp->apply(cu); loaded->apply(copy); @@ -651,11 +703,12 @@ SCENARIO("Test compiler pass serializations") { Circuit circ = CircuitsForTesting::get().uccsd; CompilationUnit cu{circ}; CompilationUnit copy = cu; - PassPtr pp = gen_default_mapping_pass(arc); + PassPtr pp = gen_default_mapping_pass(arc, true); nlohmann::json j_pp; j_pp["pass_class"] = "StandardPass"; j_pp["StandardPass"]["name"] = "DefaultMappingPass"; j_pp["StandardPass"]["architecture"] = arc; + j_pp["StandardPass"]["delay_measures"] = true; PassPtr loaded = j_pp.get(); pp->apply(cu); loaded->apply(copy); @@ -672,7 +725,11 @@ SCENARIO("Test compiler pass serializations") { j_pp["StandardPass"]["name"] = "CXMappingPass"; j_pp["StandardPass"]["architecture"] = arc; j_pp["StandardPass"]["placement"] = place; - j_pp["StandardPass"]["routing_config"] = rcon; + nlohmann::json config_array; + for (const auto& con : rcon) { + config_array.push_back(*con); + } + j_pp["StandardPass"]["routing_config"] = config_array; j_pp["StandardPass"]["directed"] = true; j_pp["StandardPass"]["delay_measures"] = false; PassPtr loaded = j_pp.get(); diff --git a/tket/tests/tkettestsfiles.cmake b/tket/tests/tkettestsfiles.cmake index 63d6799706..ea9a347b30 100644 --- a/tket/tests/tkettestsfiles.cmake +++ b/tket/tests/tkettestsfiles.cmake @@ -20,35 +20,45 @@ set(TEST_SOURCES # We should test simpler modules (e.g. Op, Circuit) before # the more complicated things that rely on them (e.g. Routing, # Transform) to help identify exactly where stuff breaks - ${TKET_TESTS_DIR}/tests_main.cpp - ${TKET_TESTS_DIR}/testutil.cpp - ${TKET_TESTS_DIR}/CircuitsForTesting.cpp - ${TKET_TESTS_DIR}/Utils/test_MatrixAnalysis.cpp ${TKET_TESTS_DIR}/Utils/test_CosSinDecomposition.cpp - ${TKET_TESTS_DIR}/Graphs/EdgeSequence.cpp - ${TKET_TESTS_DIR}/Graphs/EdgeSequenceColouringParameters.cpp - ${TKET_TESTS_DIR}/Graphs/GraphTestingRoutines.cpp - ${TKET_TESTS_DIR}/Graphs/RandomGraphGeneration.cpp - ${TKET_TESTS_DIR}/Graphs/RandomPlanarGraphs.cpp - ${TKET_TESTS_DIR}/Graphs/RNG.cpp + ${TKET_TESTS_DIR}/Utils/test_HelperFunctions.cpp + ${TKET_TESTS_DIR}/Utils/test_MatrixAnalysis.cpp + ${TKET_TESTS_DIR}/Utils/test_RNG.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphColouring.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphFindComponents.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphFindMaxClique.cpp - ${TKET_TESTS_DIR}/Graphs/test_RNG.cpp ${TKET_TESTS_DIR}/Graphs/test_GraphUtils.cpp ${TKET_TESTS_DIR}/Graphs/test_DirectedGraph.cpp ${TKET_TESTS_DIR}/Graphs/test_ArticulationPoints.cpp ${TKET_TESTS_DIR}/Graphs/test_TreeSearch.cpp + # NOTE: For testing TokenSwapping, it is easier to make use of + # Architecture to set up test problems, rather than trying + # to separate TokenSwapping-without-Architecture tests. + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_CanonicalRelabelling.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_ExactMappingLookup.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_FilteredSwapSequences.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_SwapSequenceReductions.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/test_SwapSequenceTable.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/test_DebugFunctions.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TSAUtils/test_SwapFunctions.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_ArchitectureMappingEndToEnd.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_BestTsaFixedSwapSequences.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_DistancesFromArchitecture.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_FullTsa.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_RiverFlowPathFinder.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_SwapsFromQubitMapping.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_SwapList.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_SwapListOptimiser.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_VariousPartialTsa.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_VectorListHybrid.cpp + ${TKET_TESTS_DIR}/TokenSwapping/test_VectorListHybridSkeleton.cpp ${TKET_TESTS_DIR}/test_PauliString.cpp ${TKET_TESTS_DIR}/Ops/test_ClassicalOps.cpp ${TKET_TESTS_DIR}/Ops/test_Expression.cpp ${TKET_TESTS_DIR}/Ops/test_Ops.cpp - ${TKET_TESTS_DIR}/Gate/GatesData.cpp ${TKET_TESTS_DIR}/Gate/test_GateUnitaryMatrix.cpp - ${TKET_TESTS_DIR}/Simulation/ComparisonFunctions.cpp ${TKET_TESTS_DIR}/Simulation/test_CircuitSimulator.cpp ${TKET_TESTS_DIR}/Simulation/test_PauliExpBoxUnitaryCalculator.cpp - ${TKET_TESTS_DIR}/test_Utils.cpp ${TKET_TESTS_DIR}/Circuit/test_Boxes.cpp ${TKET_TESTS_DIR}/Circuit/test_Circ.cpp ${TKET_TESTS_DIR}/Circuit/test_Symbolic.cpp @@ -59,8 +69,19 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/test_PhasePolynomials.cpp ${TKET_TESTS_DIR}/test_PauliGraph.cpp ${TKET_TESTS_DIR}/test_Architectures.cpp - ${TKET_TESTS_DIR}/test_Placement.cpp - ${TKET_TESTS_DIR}/test_Routing.cpp + ${TKET_TESTS_DIR}/test_ArchitectureAwareSynthesis.cpp + ${TKET_TESTS_DIR}/Placement/test_Placement.cpp + ${TKET_TESTS_DIR}/Placement/test_NeighbourPlacements.cpp + ${TKET_TESTS_DIR}/test_MappingVerification.cpp + ${TKET_TESTS_DIR}/test_MappingFrontier.cpp + ${TKET_TESTS_DIR}/test_RoutingMethod.cpp + ${TKET_TESTS_DIR}/test_MappingManager.cpp + ${TKET_TESTS_DIR}/test_LexicographicalComparison.cpp + ${TKET_TESTS_DIR}/test_LexiRoute.cpp + ${TKET_TESTS_DIR}/test_AASRoute.cpp + ${TKET_TESTS_DIR}/test_MultiGateReorder.cpp + ${TKET_TESTS_DIR}/test_BoxDecompRoutingMethod.cpp + ${TKET_TESTS_DIR}/test_RoutingPasses.cpp ${TKET_TESTS_DIR}/test_DeviceCharacterisation.cpp ${TKET_TESTS_DIR}/test_Clifford.cpp ${TKET_TESTS_DIR}/test_MeasurementSetup.cpp @@ -84,4 +105,5 @@ set(TEST_SOURCES ${TKET_TESTS_DIR}/ZX/test_ZXDiagram.cpp ${TKET_TESTS_DIR}/ZX/test_ZXAxioms.cpp ${TKET_TESTS_DIR}/ZX/test_ZXSimp.cpp + ${TKET_TESTS_DIR}/ZX/test_Flow.cpp ) diff --git a/tket/tests/tkettestutilsfiles.cmake b/tket/tests/tkettestutilsfiles.cmake new file mode 100644 index 0000000000..bedc380ec1 --- /dev/null +++ b/tket/tests/tkettestutilsfiles.cmake @@ -0,0 +1,43 @@ +# Copyright 2019-2022 Cambridge Quantum Computing +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# file to store all the files that serve as utils for the tket unit tests +# new files should be added here + +set(TESTUTILS_SOURCES + ${TKET_TESTS_DIR}/tests_main.cpp + ${TKET_TESTS_DIR}/testutil.cpp + ${TKET_TESTS_DIR}/CircuitsForTesting.cpp + ${TKET_TESTS_DIR}/Graphs/EdgeSequence.cpp + ${TKET_TESTS_DIR}/Graphs/EdgeSequenceColouringParameters.cpp + ${TKET_TESTS_DIR}/Graphs/GraphTestingRoutines.cpp + ${TKET_TESTS_DIR}/Graphs/RandomGraphGeneration.cpp + ${TKET_TESTS_DIR}/Graphs/RandomPlanarGraphs.cpp + ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedCompleteSolutions.cpp + ${TKET_TESTS_DIR}/TokenSwapping/Data/FixedSwapSequences.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/NeighboursFromEdges.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/PermutationTestUtils.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TableLookup/SwapSequenceReductionTester.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ArchitectureEdgesReimplementation.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/BestTsaTester.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DebugFunctions.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/DecodedProblemData.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/FullTsaTesting.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/GetRandomSet.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/PartialTsaTesting.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/ProblemGeneration.cpp + ${TKET_TESTS_DIR}/TokenSwapping/TestUtils/TestStatsStructs.cpp + ${TKET_TESTS_DIR}/Gate/GatesData.cpp + ${TKET_TESTS_DIR}/Simulation/ComparisonFunctions.cpp +) diff --git a/tket/tket_deps b/tket/tket_deps index fd502e48f5..50bd3419e1 100755 --- a/tket/tket_deps +++ b/tket/tket_deps @@ -24,22 +24,23 @@ comps = [ ] +init_str = "list(APPEND DEPS_${COMP}" + + def generate_graph(): deps = dict() for comp in comps: deps[comp] = set() - srcdir = os.path.join(src, comp) - for root, dirs, files in os.walk(srcdir): - for f in files: - if f.endswith(".cpp") or f.endswith(".hpp"): - fpath = os.path.join(root, f) - with open(fpath) as ff: - for fline in ff: - if "#include " in fline: - for comp1 in comps: - if comp1 != comp: - if comp1 + "/" in fline: - deps[comp].add(comp1) + cmakelists = os.path.join(src, comp, "CMakeLists.txt") + with open(cmakelists, "r") as f: + text = f.read() + i = text.find(init_str) + assert i >= 0 + text = text[i + len(init_str) :] + i = text.find(")") + assert i >= 0 + for comp1 in text[:i].strip().split(): + deps[comp].add(comp1) with open("depgraph.dot", "w") as f: f.write("digraph tket {\n") for comp, depcomps in deps.items():